2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for x86_64.
23 # Add AVX512F code path.
25 # Performance in cycles per byte out of large buffer.
27 # IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2
29 # P4 9.48/+99% -/22.7(ii) -
30 # Core2 7.83/+55% 7.90/8.08 4.35
31 # Westmere 7.19/+50% 5.60/6.70 3.00
32 # Sandy Bridge 8.31/+42% 5.45/6.76 2.72
33 # Ivy Bridge 6.71/+46% 5.40/6.49 2.41
34 # Haswell 5.92/+43% 5.20/6.45 2.42 1.23
35 # Skylake 5.87/+39% 4.70/- 2.31 1.19
36 # Silvermont 12.0/+33% 7.75/7.40 7.03(iii)
37 # Goldmont 10.6/+17% 5.10/- 3.28
38 # Sledgehammer 7.28/+52% -/14.2(ii) -
39 # Bulldozer 9.66/+28% 9.85/11.1 3.06(iv)
40 # VIA Nano 10.5/+46% 6.72/8.60 6.05
42 # (i) compared to older gcc 3.x one can observe >2x improvement on
44 # (ii) as it can be seen, SSE2 performance is too low on legacy
45 # processors; NxSSE2 results are naturally better, but not
46 # impressively better than IALU ones, which is why you won't
47 # find SSE2 code below;
48 # (iii) this is not optimal result for Atom because of MSROM
49 # limitations, SSE2 can do better, but gain is considered too
50 # low to justify the [maintenance] effort;
51 # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20;
55 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
57 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
62 die "can't locate x86_64-xlate.pl";
64 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
65 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
66 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
69 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
70 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
71 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
72 $avx += 1 if ($1==2.11 && $2>=8);
75 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
76 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
77 $avx = ($1>=10) + ($1>=11);
80 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
81 $avx = ($2>=3.0) + ($2>3.0);
84 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
87 # input parameter block
88 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
93 .extern OPENSSL_ia32cap_P
105 .long 0,2,4,6,1,3,5,7
107 .long 8,8,8,8,8,8,8,8
109 .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
111 .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
113 .asciz "expand 32-byte k"
116 .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
118 .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
120 .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
122 .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
123 .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
126 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
127 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
129 $arg = "\$$arg" if ($arg*1 eq $arg);
130 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
133 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
134 "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
137 sub ROUND { # critical path is 24 cycles per round
138 my ($a0,$b0,$c0,$d0)=@_;
139 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
140 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
141 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
142 my ($xc,$xc_)=map("\"$_\"",@t);
143 my @x=map("\"$_\"",@x);
145 # Consider order in which variables are addressed by their
150 # 0 4 8 12 < even round
154 # 0 5 10 15 < odd round
159 # 'a', 'b' and 'd's are permanently allocated in registers,
160 # @x[0..7,12..15], while 'c's are maintained in memory. If
161 # you observe 'c' column, you'll notice that pair of 'c's is
162 # invariant between rounds. This means that we have to reload
163 # them once per round, in the middle. This is why you'll see
164 # bunch of 'c' stores and loads in the middle, but none in
165 # the beginning or end.
167 # Normally instructions would be interleaved to favour in-order
168 # execution. Generally out-of-order cores manage it gracefully,
169 # but not this time for some reason. As in-order execution
170 # cores are dying breed, old Atom is the only one around,
171 # instructions are left uninterleaved. Besides, Atom is better
172 # off executing 1xSSSE3 code anyway...
175 "&add (@x[$a0],@x[$b0])", # Q1
176 "&xor (@x[$d0],@x[$a0])",
178 "&add (@x[$a1],@x[$b1])", # Q2
179 "&xor (@x[$d1],@x[$a1])",
182 "&add ($xc,@x[$d0])",
183 "&xor (@x[$b0],$xc)",
185 "&add ($xc_,@x[$d1])",
186 "&xor (@x[$b1],$xc_)",
189 "&add (@x[$a0],@x[$b0])",
190 "&xor (@x[$d0],@x[$a0])",
192 "&add (@x[$a1],@x[$b1])",
193 "&xor (@x[$d1],@x[$a1])",
196 "&add ($xc,@x[$d0])",
197 "&xor (@x[$b0],$xc)",
199 "&add ($xc_,@x[$d1])",
200 "&xor (@x[$b1],$xc_)",
203 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
204 "&mov (\"4*$c1(%rsp)\",$xc_)",
205 "&mov ($xc,\"4*$c2(%rsp)\")",
206 "&mov ($xc_,\"4*$c3(%rsp)\")",
208 "&add (@x[$a2],@x[$b2])", # Q3
209 "&xor (@x[$d2],@x[$a2])",
211 "&add (@x[$a3],@x[$b3])", # Q4
212 "&xor (@x[$d3],@x[$a3])",
215 "&add ($xc,@x[$d2])",
216 "&xor (@x[$b2],$xc)",
218 "&add ($xc_,@x[$d3])",
219 "&xor (@x[$b3],$xc_)",
222 "&add (@x[$a2],@x[$b2])",
223 "&xor (@x[$d2],@x[$a2])",
225 "&add (@x[$a3],@x[$b3])",
226 "&xor (@x[$d3],@x[$a3])",
229 "&add ($xc,@x[$d2])",
230 "&xor (@x[$b2],$xc)",
232 "&add ($xc_,@x[$d3])",
233 "&xor (@x[$b3],$xc_)",
238 ########################################################################
239 # Generic code path that handles all lengths on pre-SSSE3 processors.
241 .globl ChaCha20_ctr32
242 .type ChaCha20_ctr32,\@function,5
247 mov OPENSSL_ia32cap_P+4(%rip),%r10
249 $code.=<<___ if ($avx>2);
250 bt \$48,%r10 # check for AVX512F
254 test \$`1<<(41-32)`,%r10d
266 #movdqa .Lsigma(%rip),%xmm0
268 movdqu 16($key),%xmm2
269 movdqu ($counter),%xmm3
270 movdqa .Lone(%rip),%xmm4
272 #movdqa %xmm0,4*0(%rsp) # key[0]
273 movdqa %xmm1,4*4(%rsp) # key[1]
274 movdqa %xmm2,4*8(%rsp) # key[2]
275 movdqa %xmm3,4*12(%rsp) # key[3]
276 mov $len,%rbp # reassign $len
281 mov \$0x61707865,@x[0] # 'expa'
282 mov \$0x3320646e,@x[1] # 'nd 3'
283 mov \$0x79622d32,@x[2] # '2-by'
284 mov \$0x6b206574,@x[3] # 'te k'
290 mov 4*13(%rsp),@x[13]
291 mov 4*14(%rsp),@x[14]
292 mov 4*15(%rsp),@x[15]
294 mov %rbp,64+0(%rsp) # save len
296 mov $inp,64+8(%rsp) # save inp
297 movq %xmm2,%rsi # "@x[8]"
298 mov $out,64+16(%rsp) # save out
300 shr \$32,%rdi # "@x[9]"
306 foreach (&ROUND (0, 4, 8,12)) { eval; }
307 foreach (&ROUND (0, 5,10,15)) { eval; }
312 mov @t[1],4*9(%rsp) # modulo-scheduled
314 mov 64(%rsp),%rbp # load len
316 mov 64+8(%rsp),$inp # load inp
317 paddd %xmm4,%xmm3 # increment counter
318 mov 64+16(%rsp),$out # load out
320 add \$0x61707865,@x[0] # 'expa'
321 add \$0x3320646e,@x[1] # 'nd 3'
322 add \$0x79622d32,@x[2] # '2-by'
323 add \$0x6b206574,@x[3] # 'te k'
328 add 4*12(%rsp),@x[12]
329 add 4*13(%rsp),@x[13]
330 add 4*14(%rsp),@x[14]
331 add 4*15(%rsp),@x[15]
332 paddd 4*8(%rsp),%xmm1
337 xor 4*0($inp),@x[0] # xor with input
345 movdqu 4*8($inp),%xmm0
346 xor 4*12($inp),@x[12]
347 xor 4*13($inp),@x[13]
348 xor 4*14($inp),@x[14]
349 xor 4*15($inp),@x[15]
350 lea 4*16($inp),$inp # inp+=64
353 movdqa %xmm2,4*8(%rsp)
354 movd %xmm3,4*12(%rsp)
356 mov @x[0],4*0($out) # write output
364 movdqu %xmm0,4*8($out)
365 mov @x[12],4*12($out)
366 mov @x[13],4*13($out)
367 mov @x[14],4*14($out)
368 mov @x[15],4*15($out)
369 lea 4*16($out),$out # out+=64
387 movdqa %xmm1,4*8(%rsp)
388 mov @x[12],4*12(%rsp)
389 mov @x[13],4*13(%rsp)
390 mov @x[14],4*14(%rsp)
391 mov @x[15],4*15(%rsp)
394 movzb ($inp,%rbx),%eax
395 movzb (%rsp,%rbx),%edx
398 mov %al,-1($out,%rbx)
403 lea 64+24+48(%rsp),%rsi
413 .size ChaCha20_ctr32,.-ChaCha20_ctr32
416 ########################################################################
417 # SSSE3 code path that handles shorter lengths
419 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
421 sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
445 my $xframe = $win64 ? 32+8 : 8;
448 .type ChaCha20_ssse3,\@function,5
452 mov %rsp,%r9 # frame pointer
454 $code.=<<___ if ($avx);
455 test \$`1<<(43-32)`,%r10d
456 jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
459 cmp \$128,$len # we might throw away some data,
460 ja .LChaCha20_4x # but overall it won't be slower
463 sub \$64+$xframe,%rsp
465 $code.=<<___ if ($win64);
466 movaps %xmm6,-0x28(%r9)
467 movaps %xmm7,-0x18(%r9)
471 movdqa .Lsigma(%rip),$a
475 movdqa .Lrot16(%rip),$rot16
476 movdqa .Lrot24(%rip),$rot24
482 mov \$10,$counter # reuse $counter
487 movdqa .Lone(%rip),$d
500 &pshufd ($c,$c,0b01001110);
501 &pshufd ($b,$b,0b00111001);
502 &pshufd ($d,$d,0b10010011);
506 &pshufd ($c,$c,0b01001110);
507 &pshufd ($b,$b,0b10010011);
508 &pshufd ($d,$d,0b00111001);
511 &jnz (".Loop_ssse3");
523 movdqu 0x10($inp),$t1
524 pxor $t,$a # xor with input
527 movdqu 0x30($inp),$t1
528 lea 0x40($inp),$inp # inp+=64
532 movdqu $a,0x00($out) # write output
536 lea 0x40($out),$out # out+=64
539 jnz .Loop_outer_ssse3
549 xor $counter,$counter
552 movzb ($inp,$counter),%eax
553 movzb (%rsp,$counter),%ecx
554 lea 1($counter),$counter
556 mov %al,-1($out,$counter)
562 $code.=<<___ if ($win64);
563 movaps -0x28(%r9),%xmm6
564 movaps -0x18(%r9),%xmm7
570 .size ChaCha20_ssse3,.-ChaCha20_ssse3
574 ########################################################################
575 # SSSE3 code path that handles longer messages.
577 # assign variables to favor Atom front-end
578 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
579 $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
580 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
581 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
583 sub SSSE3_lane_ROUND {
584 my ($a0,$b0,$c0,$d0)=@_;
585 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
586 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
587 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
588 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
589 my @x=map("\"$_\"",@xx);
591 # Consider order in which variables are addressed by their
596 # 0 4 8 12 < even round
600 # 0 5 10 15 < odd round
605 # 'a', 'b' and 'd's are permanently allocated in registers,
606 # @x[0..7,12..15], while 'c's are maintained in memory. If
607 # you observe 'c' column, you'll notice that pair of 'c's is
608 # invariant between rounds. This means that we have to reload
609 # them once per round, in the middle. This is why you'll see
610 # bunch of 'c' stores and loads in the middle, but none in
611 # the beginning or end.
614 "&paddd (@x[$a0],@x[$b0])", # Q1
615 "&paddd (@x[$a1],@x[$b1])", # Q2
616 "&pxor (@x[$d0],@x[$a0])",
617 "&pxor (@x[$d1],@x[$a1])",
618 "&pshufb (@x[$d0],$t1)",
619 "&pshufb (@x[$d1],$t1)",
621 "&paddd ($xc,@x[$d0])",
622 "&paddd ($xc_,@x[$d1])",
623 "&pxor (@x[$b0],$xc)",
624 "&pxor (@x[$b1],$xc_)",
625 "&movdqa ($t0,@x[$b0])",
626 "&pslld (@x[$b0],12)",
628 "&movdqa ($t1,@x[$b1])",
629 "&pslld (@x[$b1],12)",
630 "&por (@x[$b0],$t0)",
632 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
633 "&por (@x[$b1],$t1)",
635 "&paddd (@x[$a0],@x[$b0])",
636 "&paddd (@x[$a1],@x[$b1])",
637 "&pxor (@x[$d0],@x[$a0])",
638 "&pxor (@x[$d1],@x[$a1])",
639 "&pshufb (@x[$d0],$t0)",
640 "&pshufb (@x[$d1],$t0)",
642 "&paddd ($xc,@x[$d0])",
643 "&paddd ($xc_,@x[$d1])",
644 "&pxor (@x[$b0],$xc)",
645 "&pxor (@x[$b1],$xc_)",
646 "&movdqa ($t1,@x[$b0])",
647 "&pslld (@x[$b0],7)",
649 "&movdqa ($t0,@x[$b1])",
650 "&pslld (@x[$b1],7)",
651 "&por (@x[$b0],$t1)",
653 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
654 "&por (@x[$b1],$t0)",
656 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
657 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
658 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
659 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
661 "&paddd (@x[$a2],@x[$b2])", # Q3
662 "&paddd (@x[$a3],@x[$b3])", # Q4
663 "&pxor (@x[$d2],@x[$a2])",
664 "&pxor (@x[$d3],@x[$a3])",
665 "&pshufb (@x[$d2],$t1)",
666 "&pshufb (@x[$d3],$t1)",
668 "&paddd ($xc,@x[$d2])",
669 "&paddd ($xc_,@x[$d3])",
670 "&pxor (@x[$b2],$xc)",
671 "&pxor (@x[$b3],$xc_)",
672 "&movdqa ($t0,@x[$b2])",
673 "&pslld (@x[$b2],12)",
675 "&movdqa ($t1,@x[$b3])",
676 "&pslld (@x[$b3],12)",
677 "&por (@x[$b2],$t0)",
679 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
680 "&por (@x[$b3],$t1)",
682 "&paddd (@x[$a2],@x[$b2])",
683 "&paddd (@x[$a3],@x[$b3])",
684 "&pxor (@x[$d2],@x[$a2])",
685 "&pxor (@x[$d3],@x[$a3])",
686 "&pshufb (@x[$d2],$t0)",
687 "&pshufb (@x[$d3],$t0)",
689 "&paddd ($xc,@x[$d2])",
690 "&paddd ($xc_,@x[$d3])",
691 "&pxor (@x[$b2],$xc)",
692 "&pxor (@x[$b3],$xc_)",
693 "&movdqa ($t1,@x[$b2])",
694 "&pslld (@x[$b2],7)",
696 "&movdqa ($t0,@x[$b3])",
697 "&pslld (@x[$b3],7)",
698 "&por (@x[$b2],$t1)",
700 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
705 my $xframe = $win64 ? 0xa8 : 8;
708 .type ChaCha20_4x,\@function,5
712 mov %rsp,%r9 # frame pointer
715 $code.=<<___ if ($avx>1);
716 shr \$32,%r10 # OPENSSL_ia32cap_P+8
717 test \$`1<<5`,%r10 # test AVX2
724 and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
725 cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
726 je .Ldo_sse3_after_all # to detect Atom
729 sub \$0x140+$xframe,%rsp
731 ################ stack layout
732 # +0x00 SIMD equivalent of @x[8-12]
734 # +0x40 constant copy of key[0-2] smashed by lanes
736 # +0x100 SIMD counters (with nonce smashed by lanes)
739 $code.=<<___ if ($win64);
740 movaps %xmm6,-0xa8(%r9)
741 movaps %xmm7,-0x98(%r9)
742 movaps %xmm8,-0x88(%r9)
743 movaps %xmm9,-0x78(%r9)
744 movaps %xmm10,-0x68(%r9)
745 movaps %xmm11,-0x58(%r9)
746 movaps %xmm12,-0x48(%r9)
747 movaps %xmm13,-0x38(%r9)
748 movaps %xmm14,-0x28(%r9)
749 movaps %xmm15,-0x18(%r9)
753 movdqa .Lsigma(%rip),$xa3 # key[0]
754 movdqu ($key),$xb3 # key[1]
755 movdqu 16($key),$xt3 # key[2]
756 movdqu ($counter),$xd3 # key[3]
757 lea 0x100(%rsp),%rcx # size optimization
758 lea .Lrot16(%rip),%r10
759 lea .Lrot24(%rip),%r11
761 pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
762 pshufd \$0x55,$xa3,$xa1
763 movdqa $xa0,0x40(%rsp) # ... and offload
764 pshufd \$0xaa,$xa3,$xa2
765 movdqa $xa1,0x50(%rsp)
766 pshufd \$0xff,$xa3,$xa3
767 movdqa $xa2,0x60(%rsp)
768 movdqa $xa3,0x70(%rsp)
770 pshufd \$0x00,$xb3,$xb0
771 pshufd \$0x55,$xb3,$xb1
772 movdqa $xb0,0x80-0x100(%rcx)
773 pshufd \$0xaa,$xb3,$xb2
774 movdqa $xb1,0x90-0x100(%rcx)
775 pshufd \$0xff,$xb3,$xb3
776 movdqa $xb2,0xa0-0x100(%rcx)
777 movdqa $xb3,0xb0-0x100(%rcx)
779 pshufd \$0x00,$xt3,$xt0 # "$xc0"
780 pshufd \$0x55,$xt3,$xt1 # "$xc1"
781 movdqa $xt0,0xc0-0x100(%rcx)
782 pshufd \$0xaa,$xt3,$xt2 # "$xc2"
783 movdqa $xt1,0xd0-0x100(%rcx)
784 pshufd \$0xff,$xt3,$xt3 # "$xc3"
785 movdqa $xt2,0xe0-0x100(%rcx)
786 movdqa $xt3,0xf0-0x100(%rcx)
788 pshufd \$0x00,$xd3,$xd0
789 pshufd \$0x55,$xd3,$xd1
790 paddd .Linc(%rip),$xd0 # don't save counters yet
791 pshufd \$0xaa,$xd3,$xd2
792 movdqa $xd1,0x110-0x100(%rcx)
793 pshufd \$0xff,$xd3,$xd3
794 movdqa $xd2,0x120-0x100(%rcx)
795 movdqa $xd3,0x130-0x100(%rcx)
801 movdqa 0x40(%rsp),$xa0 # re-load smashed key
802 movdqa 0x50(%rsp),$xa1
803 movdqa 0x60(%rsp),$xa2
804 movdqa 0x70(%rsp),$xa3
805 movdqa 0x80-0x100(%rcx),$xb0
806 movdqa 0x90-0x100(%rcx),$xb1
807 movdqa 0xa0-0x100(%rcx),$xb2
808 movdqa 0xb0-0x100(%rcx),$xb3
809 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
810 movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
811 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
812 movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
813 movdqa 0x100-0x100(%rcx),$xd0
814 movdqa 0x110-0x100(%rcx),$xd1
815 movdqa 0x120-0x100(%rcx),$xd2
816 movdqa 0x130-0x100(%rcx),$xd3
817 paddd .Lfour(%rip),$xd0 # next SIMD counters
820 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
821 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
822 movdqa (%r10),$xt3 # .Lrot16(%rip)
824 movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
830 foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
831 foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
836 paddd 0x40(%rsp),$xa0 # accumulate key material
837 paddd 0x50(%rsp),$xa1
838 paddd 0x60(%rsp),$xa2
839 paddd 0x70(%rsp),$xa3
841 movdqa $xa0,$xt2 # "de-interlace" data
848 punpcklqdq $xa2,$xa0 # "a0"
850 punpcklqdq $xt3,$xt2 # "a2"
851 punpckhqdq $xa2,$xa1 # "a1"
852 punpckhqdq $xt3,$xa3 # "a3"
854 ($xa2,$xt2)=($xt2,$xa2);
856 paddd 0x80-0x100(%rcx),$xb0
857 paddd 0x90-0x100(%rcx),$xb1
858 paddd 0xa0-0x100(%rcx),$xb2
859 paddd 0xb0-0x100(%rcx),$xb3
861 movdqa $xa0,0x00(%rsp) # offload $xaN
862 movdqa $xa1,0x10(%rsp)
863 movdqa 0x20(%rsp),$xa0 # "xc2"
864 movdqa 0x30(%rsp),$xa1 # "xc3"
873 punpcklqdq $xb2,$xb0 # "b0"
875 punpcklqdq $xt3,$xt2 # "b2"
876 punpckhqdq $xb2,$xb1 # "b1"
877 punpckhqdq $xt3,$xb3 # "b3"
879 ($xb2,$xt2)=($xt2,$xb2);
880 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
882 paddd 0xc0-0x100(%rcx),$xc0
883 paddd 0xd0-0x100(%rcx),$xc1
884 paddd 0xe0-0x100(%rcx),$xc2
885 paddd 0xf0-0x100(%rcx),$xc3
887 movdqa $xa2,0x20(%rsp) # keep offloading $xaN
888 movdqa $xa3,0x30(%rsp)
897 punpcklqdq $xc2,$xc0 # "c0"
899 punpcklqdq $xt3,$xt2 # "c2"
900 punpckhqdq $xc2,$xc1 # "c1"
901 punpckhqdq $xt3,$xc3 # "c3"
903 ($xc2,$xt2)=($xt2,$xc2);
904 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
906 paddd 0x100-0x100(%rcx),$xd0
907 paddd 0x110-0x100(%rcx),$xd1
908 paddd 0x120-0x100(%rcx),$xd2
909 paddd 0x130-0x100(%rcx),$xd3
918 punpcklqdq $xd2,$xd0 # "d0"
920 punpcklqdq $xt3,$xt2 # "d2"
921 punpckhqdq $xd2,$xd1 # "d1"
922 punpckhqdq $xt3,$xd3 # "d3"
924 ($xd2,$xt2)=($xt2,$xd2);
929 movdqu 0x00($inp),$xt0 # xor with input
930 movdqu 0x10($inp),$xt1
931 movdqu 0x20($inp),$xt2
932 movdqu 0x30($inp),$xt3
933 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
938 movdqu $xt0,0x00($out)
939 movdqu 0x40($inp),$xt0
940 movdqu $xt1,0x10($out)
941 movdqu 0x50($inp),$xt1
942 movdqu $xt2,0x20($out)
943 movdqu 0x60($inp),$xt2
944 movdqu $xt3,0x30($out)
945 movdqu 0x70($inp),$xt3
946 lea 0x80($inp),$inp # size optimization
952 movdqu $xt0,0x40($out)
953 movdqu 0x00($inp),$xt0
954 movdqu $xt1,0x50($out)
955 movdqu 0x10($inp),$xt1
956 movdqu $xt2,0x60($out)
957 movdqu 0x20($inp),$xt2
958 movdqu $xt3,0x70($out)
959 lea 0x80($out),$out # size optimization
960 movdqu 0x30($inp),$xt3
966 movdqu $xt0,0x00($out)
967 movdqu 0x40($inp),$xt0
968 movdqu $xt1,0x10($out)
969 movdqu 0x50($inp),$xt1
970 movdqu $xt2,0x20($out)
971 movdqu 0x60($inp),$xt2
972 movdqu $xt3,0x30($out)
973 movdqu 0x70($inp),$xt3
974 lea 0x80($inp),$inp # inp+=64*4
979 movdqu $xt0,0x40($out)
980 movdqu $xt1,0x50($out)
981 movdqu $xt2,0x60($out)
982 movdqu $xt3,0x70($out)
983 lea 0x80($out),$out # out+=64*4
998 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1000 #movdqa $xt0,0x00(%rsp)
1001 movdqa $xb0,0x10(%rsp)
1002 movdqa $xc0,0x20(%rsp)
1003 movdqa $xd0,0x30(%rsp)
1008 movdqu 0x00($inp),$xt0 # xor with input
1009 movdqu 0x10($inp),$xt1
1010 movdqu 0x20($inp),$xt2
1011 movdqu 0x30($inp),$xt3
1012 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
1016 movdqu $xt0,0x00($out)
1017 movdqu $xt1,0x10($out)
1018 movdqu $xt2,0x20($out)
1019 movdqu $xt3,0x30($out)
1022 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
1023 lea 0x40($inp),$inp # inp+=64*1
1025 movdqa $xt0,0x00(%rsp)
1026 movdqa $xb1,0x10(%rsp)
1027 lea 0x40($out),$out # out+=64*1
1028 movdqa $xc1,0x20(%rsp)
1029 sub \$64,$len # len-=64*1
1030 movdqa $xd1,0x30(%rsp)
1035 movdqu 0x00($inp),$xt0 # xor with input
1036 movdqu 0x10($inp),$xt1
1037 movdqu 0x20($inp),$xt2
1038 movdqu 0x30($inp),$xt3
1039 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1044 movdqu $xt0,0x00($out)
1045 movdqu 0x40($inp),$xt0
1046 movdqu $xt1,0x10($out)
1047 movdqu 0x50($inp),$xt1
1048 movdqu $xt2,0x20($out)
1049 movdqu 0x60($inp),$xt2
1050 movdqu $xt3,0x30($out)
1051 movdqu 0x70($inp),$xt3
1052 pxor 0x10(%rsp),$xt0
1056 movdqu $xt0,0x40($out)
1057 movdqu $xt1,0x50($out)
1058 movdqu $xt2,0x60($out)
1059 movdqu $xt3,0x70($out)
1062 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
1063 lea 0x80($inp),$inp # inp+=64*2
1065 movdqa $xt0,0x00(%rsp)
1066 movdqa $xb2,0x10(%rsp)
1067 lea 0x80($out),$out # out+=64*2
1068 movdqa $xc2,0x20(%rsp)
1069 sub \$128,$len # len-=64*2
1070 movdqa $xd2,0x30(%rsp)
1075 movdqu 0x00($inp),$xt0 # xor with input
1076 movdqu 0x10($inp),$xt1
1077 movdqu 0x20($inp),$xt2
1078 movdqu 0x30($inp),$xt3
1079 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1084 movdqu $xt0,0x00($out)
1085 movdqu 0x40($inp),$xt0
1086 movdqu $xt1,0x10($out)
1087 movdqu 0x50($inp),$xt1
1088 movdqu $xt2,0x20($out)
1089 movdqu 0x60($inp),$xt2
1090 movdqu $xt3,0x30($out)
1091 movdqu 0x70($inp),$xt3
1092 lea 0x80($inp),$inp # size optimization
1093 pxor 0x10(%rsp),$xt0
1098 movdqu $xt0,0x40($out)
1099 movdqu 0x00($inp),$xt0
1100 movdqu $xt1,0x50($out)
1101 movdqu 0x10($inp),$xt1
1102 movdqu $xt2,0x60($out)
1103 movdqu 0x20($inp),$xt2
1104 movdqu $xt3,0x70($out)
1105 lea 0x80($out),$out # size optimization
1106 movdqu 0x30($inp),$xt3
1107 pxor 0x20(%rsp),$xt0
1111 movdqu $xt0,0x00($out)
1112 movdqu $xt1,0x10($out)
1113 movdqu $xt2,0x20($out)
1114 movdqu $xt3,0x30($out)
1117 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
1118 lea 0x40($inp),$inp # inp+=64*3
1120 movdqa $xt0,0x00(%rsp)
1121 movdqa $xb3,0x10(%rsp)
1122 lea 0x40($out),$out # out+=64*3
1123 movdqa $xc3,0x20(%rsp)
1124 sub \$192,$len # len-=64*3
1125 movdqa $xd3,0x30(%rsp)
1128 movzb ($inp,%r10),%eax
1129 movzb (%rsp,%r10),%ecx
1132 mov %al,-1($out,%r10)
1138 $code.=<<___ if ($win64);
1139 movaps -0xa8(%r9),%xmm6
1140 movaps -0x98(%r9),%xmm7
1141 movaps -0x88(%r9),%xmm8
1142 movaps -0x78(%r9),%xmm9
1143 movaps -0x68(%r9),%xmm10
1144 movaps -0x58(%r9),%xmm11
1145 movaps -0x48(%r9),%xmm12
1146 movaps -0x38(%r9),%xmm13
1147 movaps -0x28(%r9),%xmm14
1148 movaps -0x18(%r9),%xmm15
1154 .size ChaCha20_4x,.-ChaCha20_4x
1158 ########################################################################
1159 # XOP code path that handles all lengths.
1161 # There is some "anomaly" observed depending on instructions' size or
1162 # alignment. If you look closely at below code you'll notice that
1163 # sometimes argument order varies. The order affects instruction
1164 # encoding by making it larger, and such fiddling gives 5% performance
1165 # improvement. This is on FX-4100...
1167 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1168 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1169 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1170 $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1172 sub XOP_lane_ROUND {
1173 my ($a0,$b0,$c0,$d0)=@_;
1174 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1175 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1176 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1177 my @x=map("\"$_\"",@xx);
1180 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1181 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1182 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1183 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1184 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1185 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1186 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1187 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1188 "&vprotd (@x[$d0],@x[$d0],16)",
1189 "&vprotd (@x[$d1],@x[$d1],16)",
1190 "&vprotd (@x[$d2],@x[$d2],16)",
1191 "&vprotd (@x[$d3],@x[$d3],16)",
1193 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1194 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1195 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1196 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1197 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1198 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1199 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1200 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1201 "&vprotd (@x[$b0],@x[$b0],12)",
1202 "&vprotd (@x[$b1],@x[$b1],12)",
1203 "&vprotd (@x[$b2],@x[$b2],12)",
1204 "&vprotd (@x[$b3],@x[$b3],12)",
1206 "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
1207 "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
1208 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1209 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1210 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1211 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1212 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1213 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1214 "&vprotd (@x[$d0],@x[$d0],8)",
1215 "&vprotd (@x[$d1],@x[$d1],8)",
1216 "&vprotd (@x[$d2],@x[$d2],8)",
1217 "&vprotd (@x[$d3],@x[$d3],8)",
1219 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1220 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1221 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1222 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1223 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1224 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1225 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1226 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1227 "&vprotd (@x[$b0],@x[$b0],7)",
1228 "&vprotd (@x[$b1],@x[$b1],7)",
1229 "&vprotd (@x[$b2],@x[$b2],7)",
1230 "&vprotd (@x[$b3],@x[$b3],7)"
1234 my $xframe = $win64 ? 0xa8 : 8;
1237 .type ChaCha20_4xop,\@function,5
1241 mov %rsp,%r9 # frame pointer
1242 sub \$0x140+$xframe,%rsp
1244 ################ stack layout
1245 # +0x00 SIMD equivalent of @x[8-12]
1247 # +0x40 constant copy of key[0-2] smashed by lanes
1249 # +0x100 SIMD counters (with nonce smashed by lanes)
1252 $code.=<<___ if ($win64);
1253 movaps %xmm6,-0xa8(%r9)
1254 movaps %xmm7,-0x98(%r9)
1255 movaps %xmm8,-0x88(%r9)
1256 movaps %xmm9,-0x78(%r9)
1257 movaps %xmm10,-0x68(%r9)
1258 movaps %xmm11,-0x58(%r9)
1259 movaps %xmm12,-0x48(%r9)
1260 movaps %xmm13,-0x38(%r9)
1261 movaps %xmm14,-0x28(%r9)
1262 movaps %xmm15,-0x18(%r9)
1268 vmovdqa .Lsigma(%rip),$xa3 # key[0]
1269 vmovdqu ($key),$xb3 # key[1]
1270 vmovdqu 16($key),$xt3 # key[2]
1271 vmovdqu ($counter),$xd3 # key[3]
1272 lea 0x100(%rsp),%rcx # size optimization
1274 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1275 vpshufd \$0x55,$xa3,$xa1
1276 vmovdqa $xa0,0x40(%rsp) # ... and offload
1277 vpshufd \$0xaa,$xa3,$xa2
1278 vmovdqa $xa1,0x50(%rsp)
1279 vpshufd \$0xff,$xa3,$xa3
1280 vmovdqa $xa2,0x60(%rsp)
1281 vmovdqa $xa3,0x70(%rsp)
1283 vpshufd \$0x00,$xb3,$xb0
1284 vpshufd \$0x55,$xb3,$xb1
1285 vmovdqa $xb0,0x80-0x100(%rcx)
1286 vpshufd \$0xaa,$xb3,$xb2
1287 vmovdqa $xb1,0x90-0x100(%rcx)
1288 vpshufd \$0xff,$xb3,$xb3
1289 vmovdqa $xb2,0xa0-0x100(%rcx)
1290 vmovdqa $xb3,0xb0-0x100(%rcx)
1292 vpshufd \$0x00,$xt3,$xt0 # "$xc0"
1293 vpshufd \$0x55,$xt3,$xt1 # "$xc1"
1294 vmovdqa $xt0,0xc0-0x100(%rcx)
1295 vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
1296 vmovdqa $xt1,0xd0-0x100(%rcx)
1297 vpshufd \$0xff,$xt3,$xt3 # "$xc3"
1298 vmovdqa $xt2,0xe0-0x100(%rcx)
1299 vmovdqa $xt3,0xf0-0x100(%rcx)
1301 vpshufd \$0x00,$xd3,$xd0
1302 vpshufd \$0x55,$xd3,$xd1
1303 vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
1304 vpshufd \$0xaa,$xd3,$xd2
1305 vmovdqa $xd1,0x110-0x100(%rcx)
1306 vpshufd \$0xff,$xd3,$xd3
1307 vmovdqa $xd2,0x120-0x100(%rcx)
1308 vmovdqa $xd3,0x130-0x100(%rcx)
1314 vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
1315 vmovdqa 0x50(%rsp),$xa1
1316 vmovdqa 0x60(%rsp),$xa2
1317 vmovdqa 0x70(%rsp),$xa3
1318 vmovdqa 0x80-0x100(%rcx),$xb0
1319 vmovdqa 0x90-0x100(%rcx),$xb1
1320 vmovdqa 0xa0-0x100(%rcx),$xb2
1321 vmovdqa 0xb0-0x100(%rcx),$xb3
1322 vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
1323 vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
1324 vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
1325 vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
1326 vmovdqa 0x100-0x100(%rcx),$xd0
1327 vmovdqa 0x110-0x100(%rcx),$xd1
1328 vmovdqa 0x120-0x100(%rcx),$xd2
1329 vmovdqa 0x130-0x100(%rcx),$xd3
1330 vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
1334 vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
1340 foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1341 foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1346 vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
1347 vpaddd 0x50(%rsp),$xa1,$xa1
1348 vpaddd 0x60(%rsp),$xa2,$xa2
1349 vpaddd 0x70(%rsp),$xa3,$xa3
1351 vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
1352 vmovdqa $xt3,0x30(%rsp)
1354 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1355 vpunpckldq $xa3,$xa2,$xt3
1356 vpunpckhdq $xa1,$xa0,$xa0
1357 vpunpckhdq $xa3,$xa2,$xa2
1358 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1359 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1360 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1361 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1363 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1365 vpaddd 0x80-0x100(%rcx),$xb0,$xb0
1366 vpaddd 0x90-0x100(%rcx),$xb1,$xb1
1367 vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
1368 vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
1370 vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
1371 vmovdqa $xa1,0x10(%rsp)
1372 vmovdqa 0x20(%rsp),$xa0 # "xc2"
1373 vmovdqa 0x30(%rsp),$xa1 # "xc3"
1375 vpunpckldq $xb1,$xb0,$xt2
1376 vpunpckldq $xb3,$xb2,$xt3
1377 vpunpckhdq $xb1,$xb0,$xb0
1378 vpunpckhdq $xb3,$xb2,$xb2
1379 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1380 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1381 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1382 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1384 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1385 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1387 vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
1388 vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
1389 vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
1390 vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
1392 vpunpckldq $xc1,$xc0,$xt2
1393 vpunpckldq $xc3,$xc2,$xt3
1394 vpunpckhdq $xc1,$xc0,$xc0
1395 vpunpckhdq $xc3,$xc2,$xc2
1396 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1397 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1398 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1399 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1401 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1403 vpaddd 0x100-0x100(%rcx),$xd0,$xd0
1404 vpaddd 0x110-0x100(%rcx),$xd1,$xd1
1405 vpaddd 0x120-0x100(%rcx),$xd2,$xd2
1406 vpaddd 0x130-0x100(%rcx),$xd3,$xd3
1408 vpunpckldq $xd1,$xd0,$xt2
1409 vpunpckldq $xd3,$xd2,$xt3
1410 vpunpckhdq $xd1,$xd0,$xd0
1411 vpunpckhdq $xd3,$xd2,$xd2
1412 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1413 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1414 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1415 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1417 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1418 ($xa0,$xa1)=($xt2,$xt3);
1420 vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
1421 vmovdqa 0x10(%rsp),$xa1
1426 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1427 vpxor 0x10($inp),$xb0,$xb0
1428 vpxor 0x20($inp),$xc0,$xc0
1429 vpxor 0x30($inp),$xd0,$xd0
1430 vpxor 0x40($inp),$xa1,$xa1
1431 vpxor 0x50($inp),$xb1,$xb1
1432 vpxor 0x60($inp),$xc1,$xc1
1433 vpxor 0x70($inp),$xd1,$xd1
1434 lea 0x80($inp),$inp # size optimization
1435 vpxor 0x00($inp),$xa2,$xa2
1436 vpxor 0x10($inp),$xb2,$xb2
1437 vpxor 0x20($inp),$xc2,$xc2
1438 vpxor 0x30($inp),$xd2,$xd2
1439 vpxor 0x40($inp),$xa3,$xa3
1440 vpxor 0x50($inp),$xb3,$xb3
1441 vpxor 0x60($inp),$xc3,$xc3
1442 vpxor 0x70($inp),$xd3,$xd3
1443 lea 0x80($inp),$inp # inp+=64*4
1445 vmovdqu $xa0,0x00($out)
1446 vmovdqu $xb0,0x10($out)
1447 vmovdqu $xc0,0x20($out)
1448 vmovdqu $xd0,0x30($out)
1449 vmovdqu $xa1,0x40($out)
1450 vmovdqu $xb1,0x50($out)
1451 vmovdqu $xc1,0x60($out)
1452 vmovdqu $xd1,0x70($out)
1453 lea 0x80($out),$out # size optimization
1454 vmovdqu $xa2,0x00($out)
1455 vmovdqu $xb2,0x10($out)
1456 vmovdqu $xc2,0x20($out)
1457 vmovdqu $xd2,0x30($out)
1458 vmovdqu $xa3,0x40($out)
1459 vmovdqu $xb3,0x50($out)
1460 vmovdqu $xc3,0x60($out)
1461 vmovdqu $xd3,0x70($out)
1462 lea 0x80($out),$out # out+=64*4
1472 jae .L192_or_more4xop
1474 jae .L128_or_more4xop
1476 jae .L64_or_more4xop
1479 vmovdqa $xa0,0x00(%rsp)
1480 vmovdqa $xb0,0x10(%rsp)
1481 vmovdqa $xc0,0x20(%rsp)
1482 vmovdqa $xd0,0x30(%rsp)
1487 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1488 vpxor 0x10($inp),$xb0,$xb0
1489 vpxor 0x20($inp),$xc0,$xc0
1490 vpxor 0x30($inp),$xd0,$xd0
1491 vmovdqu $xa0,0x00($out)
1492 vmovdqu $xb0,0x10($out)
1493 vmovdqu $xc0,0x20($out)
1494 vmovdqu $xd0,0x30($out)
1497 lea 0x40($inp),$inp # inp+=64*1
1498 vmovdqa $xa1,0x00(%rsp)
1500 vmovdqa $xb1,0x10(%rsp)
1501 lea 0x40($out),$out # out+=64*1
1502 vmovdqa $xc1,0x20(%rsp)
1503 sub \$64,$len # len-=64*1
1504 vmovdqa $xd1,0x30(%rsp)
1509 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1510 vpxor 0x10($inp),$xb0,$xb0
1511 vpxor 0x20($inp),$xc0,$xc0
1512 vpxor 0x30($inp),$xd0,$xd0
1513 vpxor 0x40($inp),$xa1,$xa1
1514 vpxor 0x50($inp),$xb1,$xb1
1515 vpxor 0x60($inp),$xc1,$xc1
1516 vpxor 0x70($inp),$xd1,$xd1
1518 vmovdqu $xa0,0x00($out)
1519 vmovdqu $xb0,0x10($out)
1520 vmovdqu $xc0,0x20($out)
1521 vmovdqu $xd0,0x30($out)
1522 vmovdqu $xa1,0x40($out)
1523 vmovdqu $xb1,0x50($out)
1524 vmovdqu $xc1,0x60($out)
1525 vmovdqu $xd1,0x70($out)
1528 lea 0x80($inp),$inp # inp+=64*2
1529 vmovdqa $xa2,0x00(%rsp)
1531 vmovdqa $xb2,0x10(%rsp)
1532 lea 0x80($out),$out # out+=64*2
1533 vmovdqa $xc2,0x20(%rsp)
1534 sub \$128,$len # len-=64*2
1535 vmovdqa $xd2,0x30(%rsp)
1540 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1541 vpxor 0x10($inp),$xb0,$xb0
1542 vpxor 0x20($inp),$xc0,$xc0
1543 vpxor 0x30($inp),$xd0,$xd0
1544 vpxor 0x40($inp),$xa1,$xa1
1545 vpxor 0x50($inp),$xb1,$xb1
1546 vpxor 0x60($inp),$xc1,$xc1
1547 vpxor 0x70($inp),$xd1,$xd1
1548 lea 0x80($inp),$inp # size optimization
1549 vpxor 0x00($inp),$xa2,$xa2
1550 vpxor 0x10($inp),$xb2,$xb2
1551 vpxor 0x20($inp),$xc2,$xc2
1552 vpxor 0x30($inp),$xd2,$xd2
1554 vmovdqu $xa0,0x00($out)
1555 vmovdqu $xb0,0x10($out)
1556 vmovdqu $xc0,0x20($out)
1557 vmovdqu $xd0,0x30($out)
1558 vmovdqu $xa1,0x40($out)
1559 vmovdqu $xb1,0x50($out)
1560 vmovdqu $xc1,0x60($out)
1561 vmovdqu $xd1,0x70($out)
1562 lea 0x80($out),$out # size optimization
1563 vmovdqu $xa2,0x00($out)
1564 vmovdqu $xb2,0x10($out)
1565 vmovdqu $xc2,0x20($out)
1566 vmovdqu $xd2,0x30($out)
1569 lea 0x40($inp),$inp # inp+=64*3
1570 vmovdqa $xa3,0x00(%rsp)
1572 vmovdqa $xb3,0x10(%rsp)
1573 lea 0x40($out),$out # out+=64*3
1574 vmovdqa $xc3,0x20(%rsp)
1575 sub \$192,$len # len-=64*3
1576 vmovdqa $xd3,0x30(%rsp)
1579 movzb ($inp,%r10),%eax
1580 movzb (%rsp,%r10),%ecx
1583 mov %al,-1($out,%r10)
1590 $code.=<<___ if ($win64);
1591 movaps -0xa8(%r9),%xmm6
1592 movaps -0x98(%r9),%xmm7
1593 movaps -0x88(%r9),%xmm8
1594 movaps -0x78(%r9),%xmm9
1595 movaps -0x68(%r9),%xmm10
1596 movaps -0x58(%r9),%xmm11
1597 movaps -0x48(%r9),%xmm12
1598 movaps -0x38(%r9),%xmm13
1599 movaps -0x28(%r9),%xmm14
1600 movaps -0x18(%r9),%xmm15
1606 .size ChaCha20_4xop,.-ChaCha20_4xop
1610 ########################################################################
1613 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1614 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1615 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1616 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1618 sub AVX2_lane_ROUND {
1619 my ($a0,$b0,$c0,$d0)=@_;
1620 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1621 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1622 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1623 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1624 my @x=map("\"$_\"",@xx);
1626 # Consider order in which variables are addressed by their
1631 # 0 4 8 12 < even round
1635 # 0 5 10 15 < odd round
1640 # 'a', 'b' and 'd's are permanently allocated in registers,
1641 # @x[0..7,12..15], while 'c's are maintained in memory. If
1642 # you observe 'c' column, you'll notice that pair of 'c's is
1643 # invariant between rounds. This means that we have to reload
1644 # them once per round, in the middle. This is why you'll see
1645 # bunch of 'c' stores and loads in the middle, but none in
1646 # the beginning or end.
1649 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1650 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1651 "&vpshufb (@x[$d0],@x[$d0],$t1)",
1652 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1653 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1654 "&vpshufb (@x[$d1],@x[$d1],$t1)",
1656 "&vpaddd ($xc,$xc,@x[$d0])",
1657 "&vpxor (@x[$b0],$xc,@x[$b0])",
1658 "&vpslld ($t0,@x[$b0],12)",
1659 "&vpsrld (@x[$b0],@x[$b0],20)",
1660 "&vpor (@x[$b0],$t0,@x[$b0])",
1661 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1662 "&vpaddd ($xc_,$xc_,@x[$d1])",
1663 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1664 "&vpslld ($t1,@x[$b1],12)",
1665 "&vpsrld (@x[$b1],@x[$b1],20)",
1666 "&vpor (@x[$b1],$t1,@x[$b1])",
1668 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
1669 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1670 "&vpshufb (@x[$d0],@x[$d0],$t0)",
1671 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
1672 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1673 "&vpshufb (@x[$d1],@x[$d1],$t0)",
1675 "&vpaddd ($xc,$xc,@x[$d0])",
1676 "&vpxor (@x[$b0],$xc,@x[$b0])",
1677 "&vpslld ($t1,@x[$b0],7)",
1678 "&vpsrld (@x[$b0],@x[$b0],25)",
1679 "&vpor (@x[$b0],$t1,@x[$b0])",
1680 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1681 "&vpaddd ($xc_,$xc_,@x[$d1])",
1682 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1683 "&vpslld ($t0,@x[$b1],7)",
1684 "&vpsrld (@x[$b1],@x[$b1],25)",
1685 "&vpor (@x[$b1],$t0,@x[$b1])",
1687 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
1688 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
1689 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
1690 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
1692 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1693 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1694 "&vpshufb (@x[$d2],@x[$d2],$t1)",
1695 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1696 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1697 "&vpshufb (@x[$d3],@x[$d3],$t1)",
1699 "&vpaddd ($xc,$xc,@x[$d2])",
1700 "&vpxor (@x[$b2],$xc,@x[$b2])",
1701 "&vpslld ($t0,@x[$b2],12)",
1702 "&vpsrld (@x[$b2],@x[$b2],20)",
1703 "&vpor (@x[$b2],$t0,@x[$b2])",
1704 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1705 "&vpaddd ($xc_,$xc_,@x[$d3])",
1706 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1707 "&vpslld ($t1,@x[$b3],12)",
1708 "&vpsrld (@x[$b3],@x[$b3],20)",
1709 "&vpor (@x[$b3],$t1,@x[$b3])",
1711 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1712 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1713 "&vpshufb (@x[$d2],@x[$d2],$t0)",
1714 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1715 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1716 "&vpshufb (@x[$d3],@x[$d3],$t0)",
1718 "&vpaddd ($xc,$xc,@x[$d2])",
1719 "&vpxor (@x[$b2],$xc,@x[$b2])",
1720 "&vpslld ($t1,@x[$b2],7)",
1721 "&vpsrld (@x[$b2],@x[$b2],25)",
1722 "&vpor (@x[$b2],$t1,@x[$b2])",
1723 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1724 "&vpaddd ($xc_,$xc_,@x[$d3])",
1725 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1726 "&vpslld ($t0,@x[$b3],7)",
1727 "&vpsrld (@x[$b3],@x[$b3],25)",
1728 "&vpor (@x[$b3],$t0,@x[$b3])"
1732 my $xframe = $win64 ? 0xa8 : 8;
1735 .type ChaCha20_8x,\@function,5
1739 mov %rsp,%r9 # frame register
1740 sub \$0x280+$xframe,%rsp
1743 $code.=<<___ if ($win64);
1744 movaps %xmm6,-0xa8(%r9)
1745 movaps %xmm7,-0x98(%r9)
1746 movaps %xmm8,-0x88(%r9)
1747 movaps %xmm9,-0x78(%r9)
1748 movaps %xmm10,-0x68(%r9)
1749 movaps %xmm11,-0x58(%r9)
1750 movaps %xmm12,-0x48(%r9)
1751 movaps %xmm13,-0x38(%r9)
1752 movaps %xmm14,-0x28(%r9)
1753 movaps %xmm15,-0x18(%r9)
1759 ################ stack layout
1760 # +0x00 SIMD equivalent of @x[8-12]
1762 # +0x80 constant copy of key[0-2] smashed by lanes
1764 # +0x200 SIMD counters (with nonce smashed by lanes)
1768 vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
1769 vbroadcasti128 ($key),$xb3 # key[1]
1770 vbroadcasti128 16($key),$xt3 # key[2]
1771 vbroadcasti128 ($counter),$xd3 # key[3]
1772 lea 0x100(%rsp),%rcx # size optimization
1773 lea 0x200(%rsp),%rax # size optimization
1774 lea .Lrot16(%rip),%r10
1775 lea .Lrot24(%rip),%r11
1777 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1778 vpshufd \$0x55,$xa3,$xa1
1779 vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
1780 vpshufd \$0xaa,$xa3,$xa2
1781 vmovdqa $xa1,0xa0-0x100(%rcx)
1782 vpshufd \$0xff,$xa3,$xa3
1783 vmovdqa $xa2,0xc0-0x100(%rcx)
1784 vmovdqa $xa3,0xe0-0x100(%rcx)
1786 vpshufd \$0x00,$xb3,$xb0
1787 vpshufd \$0x55,$xb3,$xb1
1788 vmovdqa $xb0,0x100-0x100(%rcx)
1789 vpshufd \$0xaa,$xb3,$xb2
1790 vmovdqa $xb1,0x120-0x100(%rcx)
1791 vpshufd \$0xff,$xb3,$xb3
1792 vmovdqa $xb2,0x140-0x100(%rcx)
1793 vmovdqa $xb3,0x160-0x100(%rcx)
1795 vpshufd \$0x00,$xt3,$xt0 # "xc0"
1796 vpshufd \$0x55,$xt3,$xt1 # "xc1"
1797 vmovdqa $xt0,0x180-0x200(%rax)
1798 vpshufd \$0xaa,$xt3,$xt2 # "xc2"
1799 vmovdqa $xt1,0x1a0-0x200(%rax)
1800 vpshufd \$0xff,$xt3,$xt3 # "xc3"
1801 vmovdqa $xt2,0x1c0-0x200(%rax)
1802 vmovdqa $xt3,0x1e0-0x200(%rax)
1804 vpshufd \$0x00,$xd3,$xd0
1805 vpshufd \$0x55,$xd3,$xd1
1806 vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
1807 vpshufd \$0xaa,$xd3,$xd2
1808 vmovdqa $xd1,0x220-0x200(%rax)
1809 vpshufd \$0xff,$xd3,$xd3
1810 vmovdqa $xd2,0x240-0x200(%rax)
1811 vmovdqa $xd3,0x260-0x200(%rax)
1817 vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
1818 vmovdqa 0xa0-0x100(%rcx),$xa1
1819 vmovdqa 0xc0-0x100(%rcx),$xa2
1820 vmovdqa 0xe0-0x100(%rcx),$xa3
1821 vmovdqa 0x100-0x100(%rcx),$xb0
1822 vmovdqa 0x120-0x100(%rcx),$xb1
1823 vmovdqa 0x140-0x100(%rcx),$xb2
1824 vmovdqa 0x160-0x100(%rcx),$xb3
1825 vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
1826 vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
1827 vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
1828 vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
1829 vmovdqa 0x200-0x200(%rax),$xd0
1830 vmovdqa 0x220-0x200(%rax),$xd1
1831 vmovdqa 0x240-0x200(%rax),$xd2
1832 vmovdqa 0x260-0x200(%rax),$xd3
1833 vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
1836 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
1837 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
1838 vbroadcasti128 (%r10),$xt3
1839 vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
1846 foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1847 foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1852 lea 0x200(%rsp),%rax # size optimization
1853 vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
1854 vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
1855 vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
1856 vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
1858 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1859 vpunpckldq $xa3,$xa2,$xt3
1860 vpunpckhdq $xa1,$xa0,$xa0
1861 vpunpckhdq $xa3,$xa2,$xa2
1862 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1863 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1864 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1865 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1867 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1869 vpaddd 0x100-0x100(%rcx),$xb0,$xb0
1870 vpaddd 0x120-0x100(%rcx),$xb1,$xb1
1871 vpaddd 0x140-0x100(%rcx),$xb2,$xb2
1872 vpaddd 0x160-0x100(%rcx),$xb3,$xb3
1874 vpunpckldq $xb1,$xb0,$xt2
1875 vpunpckldq $xb3,$xb2,$xt3
1876 vpunpckhdq $xb1,$xb0,$xb0
1877 vpunpckhdq $xb3,$xb2,$xb2
1878 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1879 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1880 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1881 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1883 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1885 vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
1886 vperm2i128 \$0x31,$xb0,$xa0,$xb0
1887 vperm2i128 \$0x20,$xb1,$xa1,$xa0
1888 vperm2i128 \$0x31,$xb1,$xa1,$xb1
1889 vperm2i128 \$0x20,$xb2,$xa2,$xa1
1890 vperm2i128 \$0x31,$xb2,$xa2,$xb2
1891 vperm2i128 \$0x20,$xb3,$xa3,$xa2
1892 vperm2i128 \$0x31,$xb3,$xa3,$xb3
1894 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1895 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1897 vmovdqa $xa0,0x00(%rsp) # offload $xaN
1898 vmovdqa $xa1,0x20(%rsp)
1899 vmovdqa 0x40(%rsp),$xc2 # $xa0
1900 vmovdqa 0x60(%rsp),$xc3 # $xa1
1902 vpaddd 0x180-0x200(%rax),$xc0,$xc0
1903 vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
1904 vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
1905 vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
1907 vpunpckldq $xc1,$xc0,$xt2
1908 vpunpckldq $xc3,$xc2,$xt3
1909 vpunpckhdq $xc1,$xc0,$xc0
1910 vpunpckhdq $xc3,$xc2,$xc2
1911 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1912 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1913 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1914 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1916 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1918 vpaddd 0x200-0x200(%rax),$xd0,$xd0
1919 vpaddd 0x220-0x200(%rax),$xd1,$xd1
1920 vpaddd 0x240-0x200(%rax),$xd2,$xd2
1921 vpaddd 0x260-0x200(%rax),$xd3,$xd3
1923 vpunpckldq $xd1,$xd0,$xt2
1924 vpunpckldq $xd3,$xd2,$xt3
1925 vpunpckhdq $xd1,$xd0,$xd0
1926 vpunpckhdq $xd3,$xd2,$xd2
1927 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1928 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1929 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1930 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1932 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1934 vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
1935 vperm2i128 \$0x31,$xd0,$xc0,$xd0
1936 vperm2i128 \$0x20,$xd1,$xc1,$xc0
1937 vperm2i128 \$0x31,$xd1,$xc1,$xd1
1938 vperm2i128 \$0x20,$xd2,$xc2,$xc1
1939 vperm2i128 \$0x31,$xd2,$xc2,$xd2
1940 vperm2i128 \$0x20,$xd3,$xc3,$xc2
1941 vperm2i128 \$0x31,$xd3,$xc3,$xd3
1943 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1944 ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1945 ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1946 ($xa0,$xa1)=($xt2,$xt3);
1948 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
1949 vmovdqa 0x20(%rsp),$xa1
1954 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1955 vpxor 0x20($inp),$xb0,$xb0
1956 vpxor 0x40($inp),$xc0,$xc0
1957 vpxor 0x60($inp),$xd0,$xd0
1958 lea 0x80($inp),$inp # size optimization
1959 vmovdqu $xa0,0x00($out)
1960 vmovdqu $xb0,0x20($out)
1961 vmovdqu $xc0,0x40($out)
1962 vmovdqu $xd0,0x60($out)
1963 lea 0x80($out),$out # size optimization
1965 vpxor 0x00($inp),$xa1,$xa1
1966 vpxor 0x20($inp),$xb1,$xb1
1967 vpxor 0x40($inp),$xc1,$xc1
1968 vpxor 0x60($inp),$xd1,$xd1
1969 lea 0x80($inp),$inp # size optimization
1970 vmovdqu $xa1,0x00($out)
1971 vmovdqu $xb1,0x20($out)
1972 vmovdqu $xc1,0x40($out)
1973 vmovdqu $xd1,0x60($out)
1974 lea 0x80($out),$out # size optimization
1976 vpxor 0x00($inp),$xa2,$xa2
1977 vpxor 0x20($inp),$xb2,$xb2
1978 vpxor 0x40($inp),$xc2,$xc2
1979 vpxor 0x60($inp),$xd2,$xd2
1980 lea 0x80($inp),$inp # size optimization
1981 vmovdqu $xa2,0x00($out)
1982 vmovdqu $xb2,0x20($out)
1983 vmovdqu $xc2,0x40($out)
1984 vmovdqu $xd2,0x60($out)
1985 lea 0x80($out),$out # size optimization
1987 vpxor 0x00($inp),$xa3,$xa3
1988 vpxor 0x20($inp),$xb3,$xb3
1989 vpxor 0x40($inp),$xc3,$xc3
1990 vpxor 0x60($inp),$xd3,$xd3
1991 lea 0x80($inp),$inp # size optimization
1992 vmovdqu $xa3,0x00($out)
1993 vmovdqu $xb3,0x20($out)
1994 vmovdqu $xc3,0x40($out)
1995 vmovdqu $xd3,0x60($out)
1996 lea 0x80($out),$out # size optimization
2020 vmovdqa $xa0,0x00(%rsp)
2021 vmovdqa $xb0,0x20(%rsp)
2026 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2027 vpxor 0x20($inp),$xb0,$xb0
2028 vmovdqu $xa0,0x00($out)
2029 vmovdqu $xb0,0x20($out)
2032 lea 0x40($inp),$inp # inp+=64*1
2034 vmovdqa $xc0,0x00(%rsp)
2035 lea 0x40($out),$out # out+=64*1
2036 sub \$64,$len # len-=64*1
2037 vmovdqa $xd0,0x20(%rsp)
2042 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2043 vpxor 0x20($inp),$xb0,$xb0
2044 vpxor 0x40($inp),$xc0,$xc0
2045 vpxor 0x60($inp),$xd0,$xd0
2046 vmovdqu $xa0,0x00($out)
2047 vmovdqu $xb0,0x20($out)
2048 vmovdqu $xc0,0x40($out)
2049 vmovdqu $xd0,0x60($out)
2052 lea 0x80($inp),$inp # inp+=64*2
2054 vmovdqa $xa1,0x00(%rsp)
2055 lea 0x80($out),$out # out+=64*2
2056 sub \$128,$len # len-=64*2
2057 vmovdqa $xb1,0x20(%rsp)
2062 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2063 vpxor 0x20($inp),$xb0,$xb0
2064 vpxor 0x40($inp),$xc0,$xc0
2065 vpxor 0x60($inp),$xd0,$xd0
2066 vpxor 0x80($inp),$xa1,$xa1
2067 vpxor 0xa0($inp),$xb1,$xb1
2068 vmovdqu $xa0,0x00($out)
2069 vmovdqu $xb0,0x20($out)
2070 vmovdqu $xc0,0x40($out)
2071 vmovdqu $xd0,0x60($out)
2072 vmovdqu $xa1,0x80($out)
2073 vmovdqu $xb1,0xa0($out)
2076 lea 0xc0($inp),$inp # inp+=64*3
2078 vmovdqa $xc1,0x00(%rsp)
2079 lea 0xc0($out),$out # out+=64*3
2080 sub \$192,$len # len-=64*3
2081 vmovdqa $xd1,0x20(%rsp)
2086 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2087 vpxor 0x20($inp),$xb0,$xb0
2088 vpxor 0x40($inp),$xc0,$xc0
2089 vpxor 0x60($inp),$xd0,$xd0
2090 vpxor 0x80($inp),$xa1,$xa1
2091 vpxor 0xa0($inp),$xb1,$xb1
2092 vpxor 0xc0($inp),$xc1,$xc1
2093 vpxor 0xe0($inp),$xd1,$xd1
2094 vmovdqu $xa0,0x00($out)
2095 vmovdqu $xb0,0x20($out)
2096 vmovdqu $xc0,0x40($out)
2097 vmovdqu $xd0,0x60($out)
2098 vmovdqu $xa1,0x80($out)
2099 vmovdqu $xb1,0xa0($out)
2100 vmovdqu $xc1,0xc0($out)
2101 vmovdqu $xd1,0xe0($out)
2104 lea 0x100($inp),$inp # inp+=64*4
2106 vmovdqa $xa2,0x00(%rsp)
2107 lea 0x100($out),$out # out+=64*4
2108 sub \$256,$len # len-=64*4
2109 vmovdqa $xb2,0x20(%rsp)
2114 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2115 vpxor 0x20($inp),$xb0,$xb0
2116 vpxor 0x40($inp),$xc0,$xc0
2117 vpxor 0x60($inp),$xd0,$xd0
2118 vpxor 0x80($inp),$xa1,$xa1
2119 vpxor 0xa0($inp),$xb1,$xb1
2120 vpxor 0xc0($inp),$xc1,$xc1
2121 vpxor 0xe0($inp),$xd1,$xd1
2122 vpxor 0x100($inp),$xa2,$xa2
2123 vpxor 0x120($inp),$xb2,$xb2
2124 vmovdqu $xa0,0x00($out)
2125 vmovdqu $xb0,0x20($out)
2126 vmovdqu $xc0,0x40($out)
2127 vmovdqu $xd0,0x60($out)
2128 vmovdqu $xa1,0x80($out)
2129 vmovdqu $xb1,0xa0($out)
2130 vmovdqu $xc1,0xc0($out)
2131 vmovdqu $xd1,0xe0($out)
2132 vmovdqu $xa2,0x100($out)
2133 vmovdqu $xb2,0x120($out)
2136 lea 0x140($inp),$inp # inp+=64*5
2138 vmovdqa $xc2,0x00(%rsp)
2139 lea 0x140($out),$out # out+=64*5
2140 sub \$320,$len # len-=64*5
2141 vmovdqa $xd2,0x20(%rsp)
2146 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2147 vpxor 0x20($inp),$xb0,$xb0
2148 vpxor 0x40($inp),$xc0,$xc0
2149 vpxor 0x60($inp),$xd0,$xd0
2150 vpxor 0x80($inp),$xa1,$xa1
2151 vpxor 0xa0($inp),$xb1,$xb1
2152 vpxor 0xc0($inp),$xc1,$xc1
2153 vpxor 0xe0($inp),$xd1,$xd1
2154 vpxor 0x100($inp),$xa2,$xa2
2155 vpxor 0x120($inp),$xb2,$xb2
2156 vpxor 0x140($inp),$xc2,$xc2
2157 vpxor 0x160($inp),$xd2,$xd2
2158 vmovdqu $xa0,0x00($out)
2159 vmovdqu $xb0,0x20($out)
2160 vmovdqu $xc0,0x40($out)
2161 vmovdqu $xd0,0x60($out)
2162 vmovdqu $xa1,0x80($out)
2163 vmovdqu $xb1,0xa0($out)
2164 vmovdqu $xc1,0xc0($out)
2165 vmovdqu $xd1,0xe0($out)
2166 vmovdqu $xa2,0x100($out)
2167 vmovdqu $xb2,0x120($out)
2168 vmovdqu $xc2,0x140($out)
2169 vmovdqu $xd2,0x160($out)
2172 lea 0x180($inp),$inp # inp+=64*6
2174 vmovdqa $xa3,0x00(%rsp)
2175 lea 0x180($out),$out # out+=64*6
2176 sub \$384,$len # len-=64*6
2177 vmovdqa $xb3,0x20(%rsp)
2182 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2183 vpxor 0x20($inp),$xb0,$xb0
2184 vpxor 0x40($inp),$xc0,$xc0
2185 vpxor 0x60($inp),$xd0,$xd0
2186 vpxor 0x80($inp),$xa1,$xa1
2187 vpxor 0xa0($inp),$xb1,$xb1
2188 vpxor 0xc0($inp),$xc1,$xc1
2189 vpxor 0xe0($inp),$xd1,$xd1
2190 vpxor 0x100($inp),$xa2,$xa2
2191 vpxor 0x120($inp),$xb2,$xb2
2192 vpxor 0x140($inp),$xc2,$xc2
2193 vpxor 0x160($inp),$xd2,$xd2
2194 vpxor 0x180($inp),$xa3,$xa3
2195 vpxor 0x1a0($inp),$xb3,$xb3
2196 vmovdqu $xa0,0x00($out)
2197 vmovdqu $xb0,0x20($out)
2198 vmovdqu $xc0,0x40($out)
2199 vmovdqu $xd0,0x60($out)
2200 vmovdqu $xa1,0x80($out)
2201 vmovdqu $xb1,0xa0($out)
2202 vmovdqu $xc1,0xc0($out)
2203 vmovdqu $xd1,0xe0($out)
2204 vmovdqu $xa2,0x100($out)
2205 vmovdqu $xb2,0x120($out)
2206 vmovdqu $xc2,0x140($out)
2207 vmovdqu $xd2,0x160($out)
2208 vmovdqu $xa3,0x180($out)
2209 vmovdqu $xb3,0x1a0($out)
2212 lea 0x1c0($inp),$inp # inp+=64*7
2214 vmovdqa $xc3,0x00(%rsp)
2215 lea 0x1c0($out),$out # out+=64*7
2216 sub \$448,$len # len-=64*7
2217 vmovdqa $xd3,0x20(%rsp)
2220 movzb ($inp,%r10),%eax
2221 movzb (%rsp,%r10),%ecx
2224 mov %al,-1($out,%r10)
2231 $code.=<<___ if ($win64);
2232 movaps -0xa8(%r9),%xmm6
2233 movaps -0x98(%r9),%xmm7
2234 movaps -0x88(%r9),%xmm8
2235 movaps -0x78(%r9),%xmm9
2236 movaps -0x68(%r9),%xmm10
2237 movaps -0x58(%r9),%xmm11
2238 movaps -0x48(%r9),%xmm12
2239 movaps -0x38(%r9),%xmm13
2240 movaps -0x28(%r9),%xmm14
2241 movaps -0x18(%r9),%xmm15
2247 .size ChaCha20_8x,.-ChaCha20_8x
2251 ########################################################################
2254 # This one handles shorter inputs...
2256 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2257 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2259 sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
2277 my $xframe = $win64 ? 32+8 : 8;
2280 .type ChaCha20_avx512,\@function,5
2284 mov %rsp,%r9 # frame pointer
2288 sub \$64+$xframe,%rsp
2290 $code.=<<___ if ($win64);
2291 movaps %xmm6,-0x28(%r9)
2292 movaps %xmm7,-0x18(%r9)
2296 vbroadcasti32x4 .Lsigma(%rip),$a
2297 vbroadcasti32x4 ($key),$b
2298 vbroadcasti32x4 16($key),$c
2299 vbroadcasti32x4 ($counter),$d
2304 vpaddd .Lzeroz(%rip),$d,$d
2305 vmovdqa32 .Lfourz(%rip),$fourz
2306 mov \$10,$counter # reuse $counter
2315 vpaddd $fourz,$d_,$d
2324 &vpshufd ($c,$c,0b01001110);
2325 &vpshufd ($b,$b,0b00111001);
2326 &vpshufd ($d,$d,0b10010011);
2329 &vpshufd ($c,$c,0b01001110);
2330 &vpshufd ($b,$b,0b10010011);
2331 &vpshufd ($d,$d,0b00111001);
2334 &jnz (".Loop_avx512");
2345 vpxor 0x00($inp),%x#$a,$t0 # xor with input
2346 vpxor 0x10($inp),%x#$b,$t1
2347 vpxor 0x20($inp),%x#$c,$t2
2348 vpxor 0x30($inp),%x#$d,$t3
2349 lea 0x40($inp),$inp # inp+=64
2351 vmovdqu $t0,0x00($out) # write output
2352 vmovdqu $t1,0x10($out)
2353 vmovdqu $t2,0x20($out)
2354 vmovdqu $t3,0x30($out)
2355 lea 0x40($out),$out # out+=64
2359 vextracti32x4 \$1,$a,$t0
2360 vextracti32x4 \$1,$b,$t1
2361 vextracti32x4 \$1,$c,$t2
2362 vextracti32x4 \$1,$d,$t3
2367 vpxor 0x00($inp),$t0,$t0 # xor with input
2368 vpxor 0x10($inp),$t1,$t1
2369 vpxor 0x20($inp),$t2,$t2
2370 vpxor 0x30($inp),$t3,$t3
2371 lea 0x40($inp),$inp # inp+=64
2373 vmovdqu $t0,0x00($out) # write output
2374 vmovdqu $t1,0x10($out)
2375 vmovdqu $t2,0x20($out)
2376 vmovdqu $t3,0x30($out)
2377 lea 0x40($out),$out # out+=64
2381 vextracti32x4 \$2,$a,$t0
2382 vextracti32x4 \$2,$b,$t1
2383 vextracti32x4 \$2,$c,$t2
2384 vextracti32x4 \$2,$d,$t3
2389 vpxor 0x00($inp),$t0,$t0 # xor with input
2390 vpxor 0x10($inp),$t1,$t1
2391 vpxor 0x20($inp),$t2,$t2
2392 vpxor 0x30($inp),$t3,$t3
2393 lea 0x40($inp),$inp # inp+=64
2395 vmovdqu $t0,0x00($out) # write output
2396 vmovdqu $t1,0x10($out)
2397 vmovdqu $t2,0x20($out)
2398 vmovdqu $t3,0x30($out)
2399 lea 0x40($out),$out # out+=64
2403 vextracti32x4 \$3,$a,$t0
2404 vextracti32x4 \$3,$b,$t1
2405 vextracti32x4 \$3,$c,$t2
2406 vextracti32x4 \$3,$d,$t3
2411 vpxor 0x00($inp),$t0,$t0 # xor with input
2412 vpxor 0x10($inp),$t1,$t1
2413 vpxor 0x20($inp),$t2,$t2
2414 vpxor 0x30($inp),$t3,$t3
2415 lea 0x40($inp),$inp # inp+=64
2417 vmovdqu $t0,0x00($out) # write output
2418 vmovdqu $t1,0x10($out)
2419 vmovdqu $t2,0x20($out)
2420 vmovdqu $t3,0x30($out)
2421 lea 0x40($out),$out # out+=64
2423 jnz .Loop_outer_avx512
2429 vmovdqa %x#$a,0x00(%rsp)
2430 vmovdqa %x#$b,0x10(%rsp)
2431 vmovdqa %x#$c,0x20(%rsp)
2432 vmovdqa %x#$d,0x30(%rsp)
2434 jmp .Loop_tail_avx512
2438 vmovdqa $t0,0x00(%rsp)
2439 vmovdqa $t1,0x10(%rsp)
2440 vmovdqa $t2,0x20(%rsp)
2441 vmovdqa $t3,0x30(%rsp)
2445 movzb ($inp,$counter),%eax
2446 movzb (%rsp,$counter),%ecx
2447 lea 1($counter),$counter
2449 mov %al,-1($out,$counter)
2451 jnz .Loop_tail_avx512
2453 vmovdqa32 $a_,0x00(%rsp)
2458 $code.=<<___ if ($win64);
2459 movaps -0x28(%r9),%xmm6
2460 movaps -0x18(%r9),%xmm7
2466 .size ChaCha20_avx512,.-ChaCha20_avx512
2470 # This one handles longer inputs...
2472 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2473 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2474 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2475 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2476 my @key=map("%zmm$_",(16..31));
2477 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2479 sub AVX512_lane_ROUND {
2480 my ($a0,$b0,$c0,$d0)=@_;
2481 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2482 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2483 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2484 my @x=map("\"$_\"",@xx);
2487 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
2488 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
2489 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
2490 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
2491 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2492 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2493 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2494 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2495 "&vprold (@x[$d0],@x[$d0],16)",
2496 "&vprold (@x[$d1],@x[$d1],16)",
2497 "&vprold (@x[$d2],@x[$d2],16)",
2498 "&vprold (@x[$d3],@x[$d3],16)",
2500 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2501 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2502 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2503 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2504 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2505 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2506 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2507 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2508 "&vprold (@x[$b0],@x[$b0],12)",
2509 "&vprold (@x[$b1],@x[$b1],12)",
2510 "&vprold (@x[$b2],@x[$b2],12)",
2511 "&vprold (@x[$b3],@x[$b3],12)",
2513 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
2514 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
2515 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
2516 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
2517 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2518 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2519 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2520 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2521 "&vprold (@x[$d0],@x[$d0],8)",
2522 "&vprold (@x[$d1],@x[$d1],8)",
2523 "&vprold (@x[$d2],@x[$d2],8)",
2524 "&vprold (@x[$d3],@x[$d3],8)",
2526 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2527 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2528 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2529 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2530 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2531 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2532 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2533 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2534 "&vprold (@x[$b0],@x[$b0],7)",
2535 "&vprold (@x[$b1],@x[$b1],7)",
2536 "&vprold (@x[$b2],@x[$b2],7)",
2537 "&vprold (@x[$b3],@x[$b3],7)"
2541 my $xframe = $win64 ? 0xa8 : 8;
2544 .type ChaCha20_16x,\@function,5
2548 mov %rsp,%r9 # frame register
2549 sub \$64+$xframe,%rsp
2552 $code.=<<___ if ($win64);
2553 movaps %xmm6,-0xa8(%r9)
2554 movaps %xmm7,-0x98(%r9)
2555 movaps %xmm8,-0x88(%r9)
2556 movaps %xmm9,-0x78(%r9)
2557 movaps %xmm10,-0x68(%r9)
2558 movaps %xmm11,-0x58(%r9)
2559 movaps %xmm12,-0x48(%r9)
2560 movaps %xmm13,-0x38(%r9)
2561 movaps %xmm14,-0x28(%r9)
2562 movaps %xmm15,-0x18(%r9)
2568 lea .Lsigma(%rip),%r10
2569 vbroadcasti32x4 (%r10),$xa3 # key[0]
2570 vbroadcasti32x4 ($key),$xb3 # key[1]
2571 vbroadcasti32x4 16($key),$xc3 # key[2]
2572 vbroadcasti32x4 ($counter),$xd3 # key[3]
2574 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
2575 vpshufd \$0x55,$xa3,$xa1
2576 vpshufd \$0xaa,$xa3,$xa2
2577 vpshufd \$0xff,$xa3,$xa3
2578 vmovdqa64 $xa0,@key[0]
2579 vmovdqa64 $xa1,@key[1]
2580 vmovdqa64 $xa2,@key[2]
2581 vmovdqa64 $xa3,@key[3]
2583 vpshufd \$0x00,$xb3,$xb0
2584 vpshufd \$0x55,$xb3,$xb1
2585 vpshufd \$0xaa,$xb3,$xb2
2586 vpshufd \$0xff,$xb3,$xb3
2587 vmovdqa64 $xb0,@key[4]
2588 vmovdqa64 $xb1,@key[5]
2589 vmovdqa64 $xb2,@key[6]
2590 vmovdqa64 $xb3,@key[7]
2592 vpshufd \$0x00,$xc3,$xc0
2593 vpshufd \$0x55,$xc3,$xc1
2594 vpshufd \$0xaa,$xc3,$xc2
2595 vpshufd \$0xff,$xc3,$xc3
2596 vmovdqa64 $xc0,@key[8]
2597 vmovdqa64 $xc1,@key[9]
2598 vmovdqa64 $xc2,@key[10]
2599 vmovdqa64 $xc3,@key[11]
2601 vpshufd \$0x00,$xd3,$xd0
2602 vpshufd \$0x55,$xd3,$xd1
2603 vpshufd \$0xaa,$xd3,$xd2
2604 vpshufd \$0xff,$xd3,$xd3
2605 vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
2606 vmovdqa64 $xd0,@key[12]
2607 vmovdqa64 $xd1,@key[13]
2608 vmovdqa64 $xd2,@key[14]
2609 vmovdqa64 $xd3,@key[15]
2616 vpbroadcastd 0(%r10),$xa0 # reload key
2617 vpbroadcastd 4(%r10),$xa1
2618 vpbroadcastd 8(%r10),$xa2
2619 vpbroadcastd 12(%r10),$xa3
2620 vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
2621 vmovdqa64 @key[4],$xb0
2622 vmovdqa64 @key[5],$xb1
2623 vmovdqa64 @key[6],$xb2
2624 vmovdqa64 @key[7],$xb3
2625 vmovdqa64 @key[8],$xc0
2626 vmovdqa64 @key[9],$xc1
2627 vmovdqa64 @key[10],$xc2
2628 vmovdqa64 @key[11],$xc3
2629 vmovdqa64 @key[12],$xd0
2630 vmovdqa64 @key[13],$xd1
2631 vmovdqa64 @key[14],$xd2
2632 vmovdqa64 @key[15],$xd3
2634 vmovdqa64 $xa0,@key[0]
2635 vmovdqa64 $xa1,@key[1]
2636 vmovdqa64 $xa2,@key[2]
2637 vmovdqa64 $xa3,@key[3]
2645 foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2646 foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2651 vpaddd @key[0],$xa0,$xa0 # accumulate key
2652 vpaddd @key[1],$xa1,$xa1
2653 vpaddd @key[2],$xa2,$xa2
2654 vpaddd @key[3],$xa3,$xa3
2656 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
2657 vpunpckldq $xa3,$xa2,$xt3
2658 vpunpckhdq $xa1,$xa0,$xa0
2659 vpunpckhdq $xa3,$xa2,$xa2
2660 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
2661 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
2662 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
2663 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
2665 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2667 vpaddd @key[4],$xb0,$xb0
2668 vpaddd @key[5],$xb1,$xb1
2669 vpaddd @key[6],$xb2,$xb2
2670 vpaddd @key[7],$xb3,$xb3
2672 vpunpckldq $xb1,$xb0,$xt2
2673 vpunpckldq $xb3,$xb2,$xt3
2674 vpunpckhdq $xb1,$xb0,$xb0
2675 vpunpckhdq $xb3,$xb2,$xb2
2676 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
2677 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
2678 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
2679 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
2681 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2683 vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
2684 vshufi32x4 \$0xee,$xb0,$xa0,$xb0
2685 vshufi32x4 \$0x44,$xb1,$xa1,$xa0
2686 vshufi32x4 \$0xee,$xb1,$xa1,$xb1
2687 vshufi32x4 \$0x44,$xb2,$xa2,$xa1
2688 vshufi32x4 \$0xee,$xb2,$xa2,$xb2
2689 vshufi32x4 \$0x44,$xb3,$xa3,$xa2
2690 vshufi32x4 \$0xee,$xb3,$xa3,$xb3
2692 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2694 vpaddd @key[8],$xc0,$xc0
2695 vpaddd @key[9],$xc1,$xc1
2696 vpaddd @key[10],$xc2,$xc2
2697 vpaddd @key[11],$xc3,$xc3
2699 vpunpckldq $xc1,$xc0,$xt2
2700 vpunpckldq $xc3,$xc2,$xt3
2701 vpunpckhdq $xc1,$xc0,$xc0
2702 vpunpckhdq $xc3,$xc2,$xc2
2703 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
2704 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
2705 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
2706 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
2708 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2710 vpaddd @key[12],$xd0,$xd0
2711 vpaddd @key[13],$xd1,$xd1
2712 vpaddd @key[14],$xd2,$xd2
2713 vpaddd @key[15],$xd3,$xd3
2715 vpunpckldq $xd1,$xd0,$xt2
2716 vpunpckldq $xd3,$xd2,$xt3
2717 vpunpckhdq $xd1,$xd0,$xd0
2718 vpunpckhdq $xd3,$xd2,$xd2
2719 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
2720 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
2721 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
2722 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
2724 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2726 vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
2727 vshufi32x4 \$0xee,$xd0,$xc0,$xd0
2728 vshufi32x4 \$0x44,$xd1,$xc1,$xc0
2729 vshufi32x4 \$0xee,$xd1,$xc1,$xd1
2730 vshufi32x4 \$0x44,$xd2,$xc2,$xc1
2731 vshufi32x4 \$0xee,$xd2,$xc2,$xd2
2732 vshufi32x4 \$0x44,$xd3,$xc3,$xc2
2733 vshufi32x4 \$0xee,$xd3,$xc3,$xd3
2735 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2737 vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
2738 vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
2739 vshufi32x4 \$0x88,$xd0,$xb0,$xc0
2740 vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
2741 vshufi32x4 \$0x88,$xc1,$xa1,$xt1
2742 vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
2743 vshufi32x4 \$0x88,$xd1,$xb1,$xc1
2744 vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
2745 vshufi32x4 \$0x88,$xc2,$xa2,$xt2
2746 vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
2747 vshufi32x4 \$0x88,$xd2,$xb2,$xc2
2748 vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
2749 vshufi32x4 \$0x88,$xc3,$xa3,$xt3
2750 vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
2751 vshufi32x4 \$0x88,$xd3,$xb3,$xc3
2752 vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
2754 ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2755 ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2757 ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2758 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2759 ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2760 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2765 vpxord 0x00($inp),$xa0,$xa0 # xor with input
2766 vpxord 0x40($inp),$xb0,$xb0
2767 vpxord 0x80($inp),$xc0,$xc0
2768 vpxord 0xc0($inp),$xd0,$xd0
2769 vmovdqu32 $xa0,0x00($out)
2770 vmovdqu32 $xb0,0x40($out)
2771 vmovdqu32 $xc0,0x80($out)
2772 vmovdqu32 $xd0,0xc0($out)
2774 vpxord 0x100($inp),$xa1,$xa1
2775 vpxord 0x140($inp),$xb1,$xb1
2776 vpxord 0x180($inp),$xc1,$xc1
2777 vpxord 0x1c0($inp),$xd1,$xd1
2778 vmovdqu32 $xa1,0x100($out)
2779 vmovdqu32 $xb1,0x140($out)
2780 vmovdqu32 $xc1,0x180($out)
2781 vmovdqu32 $xd1,0x1c0($out)
2783 vpxord 0x200($inp),$xa2,$xa2
2784 vpxord 0x240($inp),$xb2,$xb2
2785 vpxord 0x280($inp),$xc2,$xc2
2786 vpxord 0x2c0($inp),$xd2,$xd2
2787 vmovdqu32 $xa2,0x200($out)
2788 vmovdqu32 $xb2,0x240($out)
2789 vmovdqu32 $xc2,0x280($out)
2790 vmovdqu32 $xd2,0x2c0($out)
2792 vpxord 0x300($inp),$xa3,$xa3
2793 vpxord 0x340($inp),$xb3,$xb3
2794 vpxord 0x380($inp),$xc3,$xc3
2795 vpxord 0x3c0($inp),$xd3,$xd3
2796 lea 0x400($inp),$inp
2797 vmovdqu32 $xa3,0x300($out)
2798 vmovdqu32 $xb3,0x340($out)
2799 vmovdqu32 $xc3,0x380($out)
2800 vmovdqu32 $xd3,0x3c0($out)
2801 lea 0x400($out),$out
2813 jb .Less_than_64_16x
2814 vpxord ($inp),$xa0,$xa0 # xor with input
2815 vmovdqu32 $xa0,($out,$inp)
2821 jb .Less_than_64_16x
2822 vpxord ($inp),$xb0,$xb0
2823 vmovdqu32 $xb0,($out,$inp)
2829 jb .Less_than_64_16x
2830 vpxord ($inp),$xc0,$xc0
2831 vmovdqu32 $xc0,($out,$inp)
2837 jb .Less_than_64_16x
2838 vpxord ($inp),$xd0,$xd0
2839 vmovdqu32 $xd0,($out,$inp)
2845 jb .Less_than_64_16x
2846 vpxord ($inp),$xa1,$xa1
2847 vmovdqu32 $xa1,($out,$inp)
2853 jb .Less_than_64_16x
2854 vpxord ($inp),$xb1,$xb1
2855 vmovdqu32 $xb1,($out,$inp)
2861 jb .Less_than_64_16x
2862 vpxord ($inp),$xc1,$xc1
2863 vmovdqu32 $xc1,($out,$inp)
2869 jb .Less_than_64_16x
2870 vpxord ($inp),$xd1,$xd1
2871 vmovdqu32 $xd1,($out,$inp)
2877 jb .Less_than_64_16x
2878 vpxord ($inp),$xa2,$xa2
2879 vmovdqu32 $xa2,($out,$inp)
2885 jb .Less_than_64_16x
2886 vpxord ($inp),$xb2,$xb2
2887 vmovdqu32 $xb2,($out,$inp)
2893 jb .Less_than_64_16x
2894 vpxord ($inp),$xc2,$xc2
2895 vmovdqu32 $xc2,($out,$inp)
2901 jb .Less_than_64_16x
2902 vpxord ($inp),$xd2,$xd2
2903 vmovdqu32 $xd2,($out,$inp)
2909 jb .Less_than_64_16x
2910 vpxord ($inp),$xa3,$xa3
2911 vmovdqu32 $xa3,($out,$inp)
2917 jb .Less_than_64_16x
2918 vpxord ($inp),$xb3,$xb3
2919 vmovdqu32 $xb3,($out,$inp)
2925 jb .Less_than_64_16x
2926 vpxord ($inp),$xc3,$xc3
2927 vmovdqu32 $xc3,($out,$inp)
2933 vmovdqa32 $xa0,0x00(%rsp)
2934 lea ($out,$inp),$out
2938 movzb ($inp,%r10),%eax
2939 movzb (%rsp,%r10),%ecx
2942 mov %al,-1($out,%r10)
2946 vpxord $xa0,$xa0,$xa0
2947 vmovdqa32 $xa0,0(%rsp)
2952 $code.=<<___ if ($win64);
2953 movaps -0xa8(%r9),%xmm6
2954 movaps -0x98(%r9),%xmm7
2955 movaps -0x88(%r9),%xmm8
2956 movaps -0x78(%r9),%xmm9
2957 movaps -0x68(%r9),%xmm10
2958 movaps -0x58(%r9),%xmm11
2959 movaps -0x48(%r9),%xmm12
2960 movaps -0x38(%r9),%xmm13
2961 movaps -0x28(%r9),%xmm14
2962 movaps -0x18(%r9),%xmm15
2968 .size ChaCha20_16x,.-ChaCha20_16x
2972 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2973 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2981 .extern __imp_RtlVirtualUnwind
2982 .type se_handler,\@abi-omnipotent
2996 mov 120($context),%rax # pull context->Rax
2997 mov 248($context),%rbx # pull context->Rip
2999 mov 8($disp),%rsi # disp->ImageBase
3000 mov 56($disp),%r11 # disp->HandlerData
3002 lea .Lctr32_body(%rip),%r10
3003 cmp %r10,%rbx # context->Rip<.Lprologue
3004 jb .Lcommon_seh_tail
3006 mov 152($context),%rax # pull context->Rsp
3008 lea .Lno_data(%rip),%r10 # epilogue label
3009 cmp %r10,%rbx # context->Rip>=.Lepilogue
3010 jae .Lcommon_seh_tail
3012 lea 64+24+48(%rax),%rax
3020 mov %rbx,144($context) # restore context->Rbx
3021 mov %rbp,160($context) # restore context->Rbp
3022 mov %r12,216($context) # restore context->R12
3023 mov %r13,224($context) # restore context->R13
3024 mov %r14,232($context) # restore context->R14
3025 mov %r15,240($context) # restore context->R14
3030 mov %rax,152($context) # restore context->Rsp
3031 mov %rsi,168($context) # restore context->Rsi
3032 mov %rdi,176($context) # restore context->Rdi
3034 mov 40($disp),%rdi # disp->ContextRecord
3035 mov $context,%rsi # context
3036 mov \$154,%ecx # sizeof(CONTEXT)
3037 .long 0xa548f3fc # cld; rep movsq
3040 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3041 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3042 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3043 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3044 mov 40(%rsi),%r10 # disp->ContextRecord
3045 lea 56(%rsi),%r11 # &disp->HandlerData
3046 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3047 mov %r10,32(%rsp) # arg5
3048 mov %r11,40(%rsp) # arg6
3049 mov %r12,48(%rsp) # arg7
3050 mov %rcx,56(%rsp) # arg8, (NULL)
3051 call *__imp_RtlVirtualUnwind(%rip)
3053 mov \$1,%eax # ExceptionContinueSearch
3065 .size se_handler,.-se_handler
3067 .type ssse3_handler,\@abi-omnipotent
3081 mov 120($context),%rax # pull context->Rax
3082 mov 248($context),%rbx # pull context->Rip
3084 mov 8($disp),%rsi # disp->ImageBase
3085 mov 56($disp),%r11 # disp->HandlerData
3087 mov 0(%r11),%r10d # HandlerData[0]
3088 lea (%rsi,%r10),%r10 # prologue label
3089 cmp %r10,%rbx # context->Rip<prologue label
3090 jb .Lcommon_seh_tail
3092 mov 192($context),%rax # pull context->R9
3094 mov 4(%r11),%r10d # HandlerData[1]
3095 lea (%rsi,%r10),%r10 # epilogue label
3096 cmp %r10,%rbx # context->Rip>=epilogue label
3097 jae .Lcommon_seh_tail
3099 lea -0x28(%rax),%rsi
3100 lea 512($context),%rdi # &context.Xmm6
3102 .long 0xa548f3fc # cld; rep movsq
3104 jmp .Lcommon_seh_tail
3105 .size ssse3_handler,.-ssse3_handler
3107 .type full_handler,\@abi-omnipotent
3121 mov 120($context),%rax # pull context->Rax
3122 mov 248($context),%rbx # pull context->Rip
3124 mov 8($disp),%rsi # disp->ImageBase
3125 mov 56($disp),%r11 # disp->HandlerData
3127 mov 0(%r11),%r10d # HandlerData[0]
3128 lea (%rsi,%r10),%r10 # prologue label
3129 cmp %r10,%rbx # context->Rip<prologue label
3130 jb .Lcommon_seh_tail
3132 mov 192($context),%rax # pull context->R9
3134 mov 4(%r11),%r10d # HandlerData[1]
3135 lea (%rsi,%r10),%r10 # epilogue label
3136 cmp %r10,%rbx # context->Rip>=epilogue label
3137 jae .Lcommon_seh_tail
3139 lea -0xa8(%rax),%rsi
3140 lea 512($context),%rdi # &context.Xmm6
3142 .long 0xa548f3fc # cld; rep movsq
3144 jmp .Lcommon_seh_tail
3145 .size full_handler,.-full_handler
3149 .rva .LSEH_begin_ChaCha20_ctr32
3150 .rva .LSEH_end_ChaCha20_ctr32
3151 .rva .LSEH_info_ChaCha20_ctr32
3153 .rva .LSEH_begin_ChaCha20_ssse3
3154 .rva .LSEH_end_ChaCha20_ssse3
3155 .rva .LSEH_info_ChaCha20_ssse3
3157 .rva .LSEH_begin_ChaCha20_4x
3158 .rva .LSEH_end_ChaCha20_4x
3159 .rva .LSEH_info_ChaCha20_4x
3161 $code.=<<___ if ($avx);
3162 .rva .LSEH_begin_ChaCha20_4xop
3163 .rva .LSEH_end_ChaCha20_4xop
3164 .rva .LSEH_info_ChaCha20_4xop
3166 $code.=<<___ if ($avx>1);
3167 .rva .LSEH_begin_ChaCha20_8x
3168 .rva .LSEH_end_ChaCha20_8x
3169 .rva .LSEH_info_ChaCha20_8x
3171 $code.=<<___ if ($avx>2);
3172 .rva .LSEH_begin_ChaCha20_avx512
3173 .rva .LSEH_end_ChaCha20_avx512
3174 .rva .LSEH_info_ChaCha20_avx512
3176 .rva .LSEH_begin_ChaCha20_16x
3177 .rva .LSEH_end_ChaCha20_16x
3178 .rva .LSEH_info_ChaCha20_16x
3183 .LSEH_info_ChaCha20_ctr32:
3187 .LSEH_info_ChaCha20_ssse3:
3190 .rva .Lssse3_body,.Lssse3_epilogue
3192 .LSEH_info_ChaCha20_4x:
3195 .rva .L4x_body,.L4x_epilogue
3197 $code.=<<___ if ($avx);
3198 .LSEH_info_ChaCha20_4xop:
3201 .rva .L4xop_body,.L4xop_epilogue # HandlerData[]