2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for x86_64.
23 # Add AVX512F code path.
25 # Performance in cycles per byte out of large buffer.
27 # IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2
29 # P4 9.48/+99% -/22.7(ii) -
30 # Core2 7.83/+55% 7.90/8.08 4.35
31 # Westmere 7.19/+50% 5.60/6.70 3.00
32 # Sandy Bridge 8.31/+42% 5.45/6.76 2.72
33 # Ivy Bridge 6.71/+46% 5.40/6.49 2.41
34 # Haswell 5.92/+43% 5.20/6.45 2.42 1.23
35 # Skylake 5.87/+39% 4.70/- 2.31 1.19
36 # Silvermont 12.0/+33% 7.75/7.40 7.03(iii)
37 # Goldmont 10.6/+17% 5.10/- 3.28
38 # Sledgehammer 7.28/+52% -/14.2(ii) -
39 # Bulldozer 9.66/+28% 9.85/11.1 3.06(iv)
40 # VIA Nano 10.5/+46% 6.72/8.60 6.05
42 # (i) compared to older gcc 3.x one can observe >2x improvement on
44 # (ii) as it can be seen, SSE2 performance is too low on legacy
45 # processors; NxSSE2 results are naturally better, but not
46 # impressively better than IALU ones, which is why you won't
47 # find SSE2 code below;
48 # (iii) this is not optimal result for Atom because of MSROM
49 # limitations, SSE2 can do better, but gain is considered too
50 # low to justify the [maintenance] effort;
51 # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20;
55 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
57 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
62 die "can't locate x86_64-xlate.pl";
64 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
65 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
66 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
69 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
70 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
71 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
72 $avx += 1 if ($1==2.11 && $2>=8);
75 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
76 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
77 $avx = ($1>=10) + ($1>=11);
80 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
81 $avx = ($2>=3.0) + ($2>3.0);
84 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
87 # input parameter block
88 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
93 .extern OPENSSL_ia32cap_P
105 .long 0,2,4,6,1,3,5,7
107 .long 8,8,8,8,8,8,8,8
109 .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
111 .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
113 .asciz "expand 32-byte k"
116 .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
118 .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
120 .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
122 .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
123 .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
126 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
127 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
129 $arg = "\$$arg" if ($arg*1 eq $arg);
130 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
133 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
134 "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
137 sub ROUND { # critical path is 24 cycles per round
138 my ($a0,$b0,$c0,$d0)=@_;
139 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
140 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
141 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
142 my ($xc,$xc_)=map("\"$_\"",@t);
143 my @x=map("\"$_\"",@x);
145 # Consider order in which variables are addressed by their
150 # 0 4 8 12 < even round
154 # 0 5 10 15 < odd round
159 # 'a', 'b' and 'd's are permanently allocated in registers,
160 # @x[0..7,12..15], while 'c's are maintained in memory. If
161 # you observe 'c' column, you'll notice that pair of 'c's is
162 # invariant between rounds. This means that we have to reload
163 # them once per round, in the middle. This is why you'll see
164 # bunch of 'c' stores and loads in the middle, but none in
165 # the beginning or end.
167 # Normally instructions would be interleaved to favour in-order
168 # execution. Generally out-of-order cores manage it gracefully,
169 # but not this time for some reason. As in-order execution
170 # cores are dying breed, old Atom is the only one around,
171 # instructions are left uninterleaved. Besides, Atom is better
172 # off executing 1xSSSE3 code anyway...
175 "&add (@x[$a0],@x[$b0])", # Q1
176 "&xor (@x[$d0],@x[$a0])",
178 "&add (@x[$a1],@x[$b1])", # Q2
179 "&xor (@x[$d1],@x[$a1])",
182 "&add ($xc,@x[$d0])",
183 "&xor (@x[$b0],$xc)",
185 "&add ($xc_,@x[$d1])",
186 "&xor (@x[$b1],$xc_)",
189 "&add (@x[$a0],@x[$b0])",
190 "&xor (@x[$d0],@x[$a0])",
192 "&add (@x[$a1],@x[$b1])",
193 "&xor (@x[$d1],@x[$a1])",
196 "&add ($xc,@x[$d0])",
197 "&xor (@x[$b0],$xc)",
199 "&add ($xc_,@x[$d1])",
200 "&xor (@x[$b1],$xc_)",
203 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
204 "&mov (\"4*$c1(%rsp)\",$xc_)",
205 "&mov ($xc,\"4*$c2(%rsp)\")",
206 "&mov ($xc_,\"4*$c3(%rsp)\")",
208 "&add (@x[$a2],@x[$b2])", # Q3
209 "&xor (@x[$d2],@x[$a2])",
211 "&add (@x[$a3],@x[$b3])", # Q4
212 "&xor (@x[$d3],@x[$a3])",
215 "&add ($xc,@x[$d2])",
216 "&xor (@x[$b2],$xc)",
218 "&add ($xc_,@x[$d3])",
219 "&xor (@x[$b3],$xc_)",
222 "&add (@x[$a2],@x[$b2])",
223 "&xor (@x[$d2],@x[$a2])",
225 "&add (@x[$a3],@x[$b3])",
226 "&xor (@x[$d3],@x[$a3])",
229 "&add ($xc,@x[$d2])",
230 "&xor (@x[$b2],$xc)",
232 "&add ($xc_,@x[$d3])",
233 "&xor (@x[$b3],$xc_)",
238 ########################################################################
239 # Generic code path that handles all lengths on pre-SSSE3 processors.
241 .globl ChaCha20_ctr32
242 .type ChaCha20_ctr32,\@function,5
248 mov OPENSSL_ia32cap_P+4(%rip),%r10
250 $code.=<<___ if ($avx>2);
251 bt \$48,%r10 # check for AVX512F
255 test \$`1<<(41-32)`,%r10d
271 .cfi_adjust_cfa_offset 64+24
274 #movdqa .Lsigma(%rip),%xmm0
276 movdqu 16($key),%xmm2
277 movdqu ($counter),%xmm3
278 movdqa .Lone(%rip),%xmm4
280 #movdqa %xmm0,4*0(%rsp) # key[0]
281 movdqa %xmm1,4*4(%rsp) # key[1]
282 movdqa %xmm2,4*8(%rsp) # key[2]
283 movdqa %xmm3,4*12(%rsp) # key[3]
284 mov $len,%rbp # reassign $len
289 mov \$0x61707865,@x[0] # 'expa'
290 mov \$0x3320646e,@x[1] # 'nd 3'
291 mov \$0x79622d32,@x[2] # '2-by'
292 mov \$0x6b206574,@x[3] # 'te k'
298 mov 4*13(%rsp),@x[13]
299 mov 4*14(%rsp),@x[14]
300 mov 4*15(%rsp),@x[15]
302 mov %rbp,64+0(%rsp) # save len
304 mov $inp,64+8(%rsp) # save inp
305 movq %xmm2,%rsi # "@x[8]"
306 mov $out,64+16(%rsp) # save out
308 shr \$32,%rdi # "@x[9]"
314 foreach (&ROUND (0, 4, 8,12)) { eval; }
315 foreach (&ROUND (0, 5,10,15)) { eval; }
320 mov @t[1],4*9(%rsp) # modulo-scheduled
322 mov 64(%rsp),%rbp # load len
324 mov 64+8(%rsp),$inp # load inp
325 paddd %xmm4,%xmm3 # increment counter
326 mov 64+16(%rsp),$out # load out
328 add \$0x61707865,@x[0] # 'expa'
329 add \$0x3320646e,@x[1] # 'nd 3'
330 add \$0x79622d32,@x[2] # '2-by'
331 add \$0x6b206574,@x[3] # 'te k'
336 add 4*12(%rsp),@x[12]
337 add 4*13(%rsp),@x[13]
338 add 4*14(%rsp),@x[14]
339 add 4*15(%rsp),@x[15]
340 paddd 4*8(%rsp),%xmm1
345 xor 4*0($inp),@x[0] # xor with input
353 movdqu 4*8($inp),%xmm0
354 xor 4*12($inp),@x[12]
355 xor 4*13($inp),@x[13]
356 xor 4*14($inp),@x[14]
357 xor 4*15($inp),@x[15]
358 lea 4*16($inp),$inp # inp+=64
361 movdqa %xmm2,4*8(%rsp)
362 movd %xmm3,4*12(%rsp)
364 mov @x[0],4*0($out) # write output
372 movdqu %xmm0,4*8($out)
373 mov @x[12],4*12($out)
374 mov @x[13],4*13($out)
375 mov @x[14],4*14($out)
376 mov @x[15],4*15($out)
377 lea 4*16($out),$out # out+=64
395 movdqa %xmm1,4*8(%rsp)
396 mov @x[12],4*12(%rsp)
397 mov @x[13],4*13(%rsp)
398 mov @x[14],4*14(%rsp)
399 mov @x[15],4*15(%rsp)
402 movzb ($inp,%rbx),%eax
403 movzb (%rsp,%rbx),%edx
406 mov %al,-1($out,%rbx)
411 lea 64+24+48(%rsp),%rsi
426 .cfi_def_cfa_register %rsp
430 .size ChaCha20_ctr32,.-ChaCha20_ctr32
433 ########################################################################
434 # SSSE3 code path that handles shorter lengths
436 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
438 sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
462 my $xframe = $win64 ? 32+8 : 8;
465 .type ChaCha20_ssse3,\@function,5
470 mov %rsp,%r9 # frame pointer
471 .cfi_def_cfa_register %r9
473 $code.=<<___ if ($avx);
474 test \$`1<<(43-32)`,%r10d
475 jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
478 cmp \$128,$len # we might throw away some data,
479 ja .LChaCha20_4x # but overall it won't be slower
482 sub \$64+$xframe,%rsp
484 $code.=<<___ if ($win64);
485 movaps %xmm6,-0x28(%r9)
486 movaps %xmm7,-0x18(%r9)
490 movdqa .Lsigma(%rip),$a
494 movdqa .Lrot16(%rip),$rot16
495 movdqa .Lrot24(%rip),$rot24
501 mov \$10,$counter # reuse $counter
506 movdqa .Lone(%rip),$d
519 &pshufd ($c,$c,0b01001110);
520 &pshufd ($b,$b,0b00111001);
521 &pshufd ($d,$d,0b10010011);
525 &pshufd ($c,$c,0b01001110);
526 &pshufd ($b,$b,0b10010011);
527 &pshufd ($d,$d,0b00111001);
530 &jnz (".Loop_ssse3");
542 movdqu 0x10($inp),$t1
543 pxor $t,$a # xor with input
546 movdqu 0x30($inp),$t1
547 lea 0x40($inp),$inp # inp+=64
551 movdqu $a,0x00($out) # write output
555 lea 0x40($out),$out # out+=64
558 jnz .Loop_outer_ssse3
568 xor $counter,$counter
571 movzb ($inp,$counter),%eax
572 movzb (%rsp,$counter),%ecx
573 lea 1($counter),$counter
575 mov %al,-1($out,$counter)
581 $code.=<<___ if ($win64);
582 movaps -0x28(%r9),%xmm6
583 movaps -0x18(%r9),%xmm7
587 .cfi_def_cfa_register %rsp
591 .size ChaCha20_ssse3,.-ChaCha20_ssse3
595 ########################################################################
596 # SSSE3 code path that handles longer messages.
598 # assign variables to favor Atom front-end
599 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
600 $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
601 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
602 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
604 sub SSSE3_lane_ROUND {
605 my ($a0,$b0,$c0,$d0)=@_;
606 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
607 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
608 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
609 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
610 my @x=map("\"$_\"",@xx);
612 # Consider order in which variables are addressed by their
617 # 0 4 8 12 < even round
621 # 0 5 10 15 < odd round
626 # 'a', 'b' and 'd's are permanently allocated in registers,
627 # @x[0..7,12..15], while 'c's are maintained in memory. If
628 # you observe 'c' column, you'll notice that pair of 'c's is
629 # invariant between rounds. This means that we have to reload
630 # them once per round, in the middle. This is why you'll see
631 # bunch of 'c' stores and loads in the middle, but none in
632 # the beginning or end.
635 "&paddd (@x[$a0],@x[$b0])", # Q1
636 "&paddd (@x[$a1],@x[$b1])", # Q2
637 "&pxor (@x[$d0],@x[$a0])",
638 "&pxor (@x[$d1],@x[$a1])",
639 "&pshufb (@x[$d0],$t1)",
640 "&pshufb (@x[$d1],$t1)",
642 "&paddd ($xc,@x[$d0])",
643 "&paddd ($xc_,@x[$d1])",
644 "&pxor (@x[$b0],$xc)",
645 "&pxor (@x[$b1],$xc_)",
646 "&movdqa ($t0,@x[$b0])",
647 "&pslld (@x[$b0],12)",
649 "&movdqa ($t1,@x[$b1])",
650 "&pslld (@x[$b1],12)",
651 "&por (@x[$b0],$t0)",
653 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
654 "&por (@x[$b1],$t1)",
656 "&paddd (@x[$a0],@x[$b0])",
657 "&paddd (@x[$a1],@x[$b1])",
658 "&pxor (@x[$d0],@x[$a0])",
659 "&pxor (@x[$d1],@x[$a1])",
660 "&pshufb (@x[$d0],$t0)",
661 "&pshufb (@x[$d1],$t0)",
663 "&paddd ($xc,@x[$d0])",
664 "&paddd ($xc_,@x[$d1])",
665 "&pxor (@x[$b0],$xc)",
666 "&pxor (@x[$b1],$xc_)",
667 "&movdqa ($t1,@x[$b0])",
668 "&pslld (@x[$b0],7)",
670 "&movdqa ($t0,@x[$b1])",
671 "&pslld (@x[$b1],7)",
672 "&por (@x[$b0],$t1)",
674 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
675 "&por (@x[$b1],$t0)",
677 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
678 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
679 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
680 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
682 "&paddd (@x[$a2],@x[$b2])", # Q3
683 "&paddd (@x[$a3],@x[$b3])", # Q4
684 "&pxor (@x[$d2],@x[$a2])",
685 "&pxor (@x[$d3],@x[$a3])",
686 "&pshufb (@x[$d2],$t1)",
687 "&pshufb (@x[$d3],$t1)",
689 "&paddd ($xc,@x[$d2])",
690 "&paddd ($xc_,@x[$d3])",
691 "&pxor (@x[$b2],$xc)",
692 "&pxor (@x[$b3],$xc_)",
693 "&movdqa ($t0,@x[$b2])",
694 "&pslld (@x[$b2],12)",
696 "&movdqa ($t1,@x[$b3])",
697 "&pslld (@x[$b3],12)",
698 "&por (@x[$b2],$t0)",
700 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
701 "&por (@x[$b3],$t1)",
703 "&paddd (@x[$a2],@x[$b2])",
704 "&paddd (@x[$a3],@x[$b3])",
705 "&pxor (@x[$d2],@x[$a2])",
706 "&pxor (@x[$d3],@x[$a3])",
707 "&pshufb (@x[$d2],$t0)",
708 "&pshufb (@x[$d3],$t0)",
710 "&paddd ($xc,@x[$d2])",
711 "&paddd ($xc_,@x[$d3])",
712 "&pxor (@x[$b2],$xc)",
713 "&pxor (@x[$b3],$xc_)",
714 "&movdqa ($t1,@x[$b2])",
715 "&pslld (@x[$b2],7)",
717 "&movdqa ($t0,@x[$b3])",
718 "&pslld (@x[$b3],7)",
719 "&por (@x[$b2],$t1)",
721 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
726 my $xframe = $win64 ? 0xa8 : 8;
729 .type ChaCha20_4x,\@function,5
734 mov %rsp,%r9 # frame pointer
735 .cfi_def_cfa_register %r9
738 $code.=<<___ if ($avx>1);
739 shr \$32,%r10 # OPENSSL_ia32cap_P+8
740 test \$`1<<5`,%r10 # test AVX2
747 and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
748 cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
749 je .Ldo_sse3_after_all # to detect Atom
752 sub \$0x140+$xframe,%rsp
754 ################ stack layout
755 # +0x00 SIMD equivalent of @x[8-12]
757 # +0x40 constant copy of key[0-2] smashed by lanes
759 # +0x100 SIMD counters (with nonce smashed by lanes)
762 $code.=<<___ if ($win64);
763 movaps %xmm6,-0xa8(%r9)
764 movaps %xmm7,-0x98(%r9)
765 movaps %xmm8,-0x88(%r9)
766 movaps %xmm9,-0x78(%r9)
767 movaps %xmm10,-0x68(%r9)
768 movaps %xmm11,-0x58(%r9)
769 movaps %xmm12,-0x48(%r9)
770 movaps %xmm13,-0x38(%r9)
771 movaps %xmm14,-0x28(%r9)
772 movaps %xmm15,-0x18(%r9)
776 movdqa .Lsigma(%rip),$xa3 # key[0]
777 movdqu ($key),$xb3 # key[1]
778 movdqu 16($key),$xt3 # key[2]
779 movdqu ($counter),$xd3 # key[3]
780 lea 0x100(%rsp),%rcx # size optimization
781 lea .Lrot16(%rip),%r10
782 lea .Lrot24(%rip),%r11
784 pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
785 pshufd \$0x55,$xa3,$xa1
786 movdqa $xa0,0x40(%rsp) # ... and offload
787 pshufd \$0xaa,$xa3,$xa2
788 movdqa $xa1,0x50(%rsp)
789 pshufd \$0xff,$xa3,$xa3
790 movdqa $xa2,0x60(%rsp)
791 movdqa $xa3,0x70(%rsp)
793 pshufd \$0x00,$xb3,$xb0
794 pshufd \$0x55,$xb3,$xb1
795 movdqa $xb0,0x80-0x100(%rcx)
796 pshufd \$0xaa,$xb3,$xb2
797 movdqa $xb1,0x90-0x100(%rcx)
798 pshufd \$0xff,$xb3,$xb3
799 movdqa $xb2,0xa0-0x100(%rcx)
800 movdqa $xb3,0xb0-0x100(%rcx)
802 pshufd \$0x00,$xt3,$xt0 # "$xc0"
803 pshufd \$0x55,$xt3,$xt1 # "$xc1"
804 movdqa $xt0,0xc0-0x100(%rcx)
805 pshufd \$0xaa,$xt3,$xt2 # "$xc2"
806 movdqa $xt1,0xd0-0x100(%rcx)
807 pshufd \$0xff,$xt3,$xt3 # "$xc3"
808 movdqa $xt2,0xe0-0x100(%rcx)
809 movdqa $xt3,0xf0-0x100(%rcx)
811 pshufd \$0x00,$xd3,$xd0
812 pshufd \$0x55,$xd3,$xd1
813 paddd .Linc(%rip),$xd0 # don't save counters yet
814 pshufd \$0xaa,$xd3,$xd2
815 movdqa $xd1,0x110-0x100(%rcx)
816 pshufd \$0xff,$xd3,$xd3
817 movdqa $xd2,0x120-0x100(%rcx)
818 movdqa $xd3,0x130-0x100(%rcx)
824 movdqa 0x40(%rsp),$xa0 # re-load smashed key
825 movdqa 0x50(%rsp),$xa1
826 movdqa 0x60(%rsp),$xa2
827 movdqa 0x70(%rsp),$xa3
828 movdqa 0x80-0x100(%rcx),$xb0
829 movdqa 0x90-0x100(%rcx),$xb1
830 movdqa 0xa0-0x100(%rcx),$xb2
831 movdqa 0xb0-0x100(%rcx),$xb3
832 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
833 movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
834 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
835 movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
836 movdqa 0x100-0x100(%rcx),$xd0
837 movdqa 0x110-0x100(%rcx),$xd1
838 movdqa 0x120-0x100(%rcx),$xd2
839 movdqa 0x130-0x100(%rcx),$xd3
840 paddd .Lfour(%rip),$xd0 # next SIMD counters
843 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
844 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
845 movdqa (%r10),$xt3 # .Lrot16(%rip)
847 movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
853 foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
854 foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
859 paddd 0x40(%rsp),$xa0 # accumulate key material
860 paddd 0x50(%rsp),$xa1
861 paddd 0x60(%rsp),$xa2
862 paddd 0x70(%rsp),$xa3
864 movdqa $xa0,$xt2 # "de-interlace" data
871 punpcklqdq $xa2,$xa0 # "a0"
873 punpcklqdq $xt3,$xt2 # "a2"
874 punpckhqdq $xa2,$xa1 # "a1"
875 punpckhqdq $xt3,$xa3 # "a3"
877 ($xa2,$xt2)=($xt2,$xa2);
879 paddd 0x80-0x100(%rcx),$xb0
880 paddd 0x90-0x100(%rcx),$xb1
881 paddd 0xa0-0x100(%rcx),$xb2
882 paddd 0xb0-0x100(%rcx),$xb3
884 movdqa $xa0,0x00(%rsp) # offload $xaN
885 movdqa $xa1,0x10(%rsp)
886 movdqa 0x20(%rsp),$xa0 # "xc2"
887 movdqa 0x30(%rsp),$xa1 # "xc3"
896 punpcklqdq $xb2,$xb0 # "b0"
898 punpcklqdq $xt3,$xt2 # "b2"
899 punpckhqdq $xb2,$xb1 # "b1"
900 punpckhqdq $xt3,$xb3 # "b3"
902 ($xb2,$xt2)=($xt2,$xb2);
903 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
905 paddd 0xc0-0x100(%rcx),$xc0
906 paddd 0xd0-0x100(%rcx),$xc1
907 paddd 0xe0-0x100(%rcx),$xc2
908 paddd 0xf0-0x100(%rcx),$xc3
910 movdqa $xa2,0x20(%rsp) # keep offloading $xaN
911 movdqa $xa3,0x30(%rsp)
920 punpcklqdq $xc2,$xc0 # "c0"
922 punpcklqdq $xt3,$xt2 # "c2"
923 punpckhqdq $xc2,$xc1 # "c1"
924 punpckhqdq $xt3,$xc3 # "c3"
926 ($xc2,$xt2)=($xt2,$xc2);
927 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
929 paddd 0x100-0x100(%rcx),$xd0
930 paddd 0x110-0x100(%rcx),$xd1
931 paddd 0x120-0x100(%rcx),$xd2
932 paddd 0x130-0x100(%rcx),$xd3
941 punpcklqdq $xd2,$xd0 # "d0"
943 punpcklqdq $xt3,$xt2 # "d2"
944 punpckhqdq $xd2,$xd1 # "d1"
945 punpckhqdq $xt3,$xd3 # "d3"
947 ($xd2,$xt2)=($xt2,$xd2);
952 movdqu 0x00($inp),$xt0 # xor with input
953 movdqu 0x10($inp),$xt1
954 movdqu 0x20($inp),$xt2
955 movdqu 0x30($inp),$xt3
956 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
961 movdqu $xt0,0x00($out)
962 movdqu 0x40($inp),$xt0
963 movdqu $xt1,0x10($out)
964 movdqu 0x50($inp),$xt1
965 movdqu $xt2,0x20($out)
966 movdqu 0x60($inp),$xt2
967 movdqu $xt3,0x30($out)
968 movdqu 0x70($inp),$xt3
969 lea 0x80($inp),$inp # size optimization
975 movdqu $xt0,0x40($out)
976 movdqu 0x00($inp),$xt0
977 movdqu $xt1,0x50($out)
978 movdqu 0x10($inp),$xt1
979 movdqu $xt2,0x60($out)
980 movdqu 0x20($inp),$xt2
981 movdqu $xt3,0x70($out)
982 lea 0x80($out),$out # size optimization
983 movdqu 0x30($inp),$xt3
989 movdqu $xt0,0x00($out)
990 movdqu 0x40($inp),$xt0
991 movdqu $xt1,0x10($out)
992 movdqu 0x50($inp),$xt1
993 movdqu $xt2,0x20($out)
994 movdqu 0x60($inp),$xt2
995 movdqu $xt3,0x30($out)
996 movdqu 0x70($inp),$xt3
997 lea 0x80($inp),$inp # inp+=64*4
1002 movdqu $xt0,0x40($out)
1003 movdqu $xt1,0x50($out)
1004 movdqu $xt2,0x60($out)
1005 movdqu $xt3,0x70($out)
1006 lea 0x80($out),$out # out+=64*4
1021 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1023 #movdqa $xt0,0x00(%rsp)
1024 movdqa $xb0,0x10(%rsp)
1025 movdqa $xc0,0x20(%rsp)
1026 movdqa $xd0,0x30(%rsp)
1031 movdqu 0x00($inp),$xt0 # xor with input
1032 movdqu 0x10($inp),$xt1
1033 movdqu 0x20($inp),$xt2
1034 movdqu 0x30($inp),$xt3
1035 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
1039 movdqu $xt0,0x00($out)
1040 movdqu $xt1,0x10($out)
1041 movdqu $xt2,0x20($out)
1042 movdqu $xt3,0x30($out)
1045 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
1046 lea 0x40($inp),$inp # inp+=64*1
1048 movdqa $xt0,0x00(%rsp)
1049 movdqa $xb1,0x10(%rsp)
1050 lea 0x40($out),$out # out+=64*1
1051 movdqa $xc1,0x20(%rsp)
1052 sub \$64,$len # len-=64*1
1053 movdqa $xd1,0x30(%rsp)
1058 movdqu 0x00($inp),$xt0 # xor with input
1059 movdqu 0x10($inp),$xt1
1060 movdqu 0x20($inp),$xt2
1061 movdqu 0x30($inp),$xt3
1062 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1067 movdqu $xt0,0x00($out)
1068 movdqu 0x40($inp),$xt0
1069 movdqu $xt1,0x10($out)
1070 movdqu 0x50($inp),$xt1
1071 movdqu $xt2,0x20($out)
1072 movdqu 0x60($inp),$xt2
1073 movdqu $xt3,0x30($out)
1074 movdqu 0x70($inp),$xt3
1075 pxor 0x10(%rsp),$xt0
1079 movdqu $xt0,0x40($out)
1080 movdqu $xt1,0x50($out)
1081 movdqu $xt2,0x60($out)
1082 movdqu $xt3,0x70($out)
1085 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
1086 lea 0x80($inp),$inp # inp+=64*2
1088 movdqa $xt0,0x00(%rsp)
1089 movdqa $xb2,0x10(%rsp)
1090 lea 0x80($out),$out # out+=64*2
1091 movdqa $xc2,0x20(%rsp)
1092 sub \$128,$len # len-=64*2
1093 movdqa $xd2,0x30(%rsp)
1098 movdqu 0x00($inp),$xt0 # xor with input
1099 movdqu 0x10($inp),$xt1
1100 movdqu 0x20($inp),$xt2
1101 movdqu 0x30($inp),$xt3
1102 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1107 movdqu $xt0,0x00($out)
1108 movdqu 0x40($inp),$xt0
1109 movdqu $xt1,0x10($out)
1110 movdqu 0x50($inp),$xt1
1111 movdqu $xt2,0x20($out)
1112 movdqu 0x60($inp),$xt2
1113 movdqu $xt3,0x30($out)
1114 movdqu 0x70($inp),$xt3
1115 lea 0x80($inp),$inp # size optimization
1116 pxor 0x10(%rsp),$xt0
1121 movdqu $xt0,0x40($out)
1122 movdqu 0x00($inp),$xt0
1123 movdqu $xt1,0x50($out)
1124 movdqu 0x10($inp),$xt1
1125 movdqu $xt2,0x60($out)
1126 movdqu 0x20($inp),$xt2
1127 movdqu $xt3,0x70($out)
1128 lea 0x80($out),$out # size optimization
1129 movdqu 0x30($inp),$xt3
1130 pxor 0x20(%rsp),$xt0
1134 movdqu $xt0,0x00($out)
1135 movdqu $xt1,0x10($out)
1136 movdqu $xt2,0x20($out)
1137 movdqu $xt3,0x30($out)
1140 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
1141 lea 0x40($inp),$inp # inp+=64*3
1143 movdqa $xt0,0x00(%rsp)
1144 movdqa $xb3,0x10(%rsp)
1145 lea 0x40($out),$out # out+=64*3
1146 movdqa $xc3,0x20(%rsp)
1147 sub \$192,$len # len-=64*3
1148 movdqa $xd3,0x30(%rsp)
1151 movzb ($inp,%r10),%eax
1152 movzb (%rsp,%r10),%ecx
1155 mov %al,-1($out,%r10)
1161 $code.=<<___ if ($win64);
1162 movaps -0xa8(%r9),%xmm6
1163 movaps -0x98(%r9),%xmm7
1164 movaps -0x88(%r9),%xmm8
1165 movaps -0x78(%r9),%xmm9
1166 movaps -0x68(%r9),%xmm10
1167 movaps -0x58(%r9),%xmm11
1168 movaps -0x48(%r9),%xmm12
1169 movaps -0x38(%r9),%xmm13
1170 movaps -0x28(%r9),%xmm14
1171 movaps -0x18(%r9),%xmm15
1175 .cfi_def_cfa_register %rsp
1179 .size ChaCha20_4x,.-ChaCha20_4x
1183 ########################################################################
1184 # XOP code path that handles all lengths.
1186 # There is some "anomaly" observed depending on instructions' size or
1187 # alignment. If you look closely at below code you'll notice that
1188 # sometimes argument order varies. The order affects instruction
1189 # encoding by making it larger, and such fiddling gives 5% performance
1190 # improvement. This is on FX-4100...
1192 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1193 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1194 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1195 $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1197 sub XOP_lane_ROUND {
1198 my ($a0,$b0,$c0,$d0)=@_;
1199 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1200 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1201 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1202 my @x=map("\"$_\"",@xx);
1205 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1206 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1207 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1208 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1209 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1210 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1211 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1212 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1213 "&vprotd (@x[$d0],@x[$d0],16)",
1214 "&vprotd (@x[$d1],@x[$d1],16)",
1215 "&vprotd (@x[$d2],@x[$d2],16)",
1216 "&vprotd (@x[$d3],@x[$d3],16)",
1218 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1219 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1220 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1221 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1222 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1223 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1224 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1225 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1226 "&vprotd (@x[$b0],@x[$b0],12)",
1227 "&vprotd (@x[$b1],@x[$b1],12)",
1228 "&vprotd (@x[$b2],@x[$b2],12)",
1229 "&vprotd (@x[$b3],@x[$b3],12)",
1231 "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
1232 "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
1233 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1234 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1235 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1236 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1237 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1238 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1239 "&vprotd (@x[$d0],@x[$d0],8)",
1240 "&vprotd (@x[$d1],@x[$d1],8)",
1241 "&vprotd (@x[$d2],@x[$d2],8)",
1242 "&vprotd (@x[$d3],@x[$d3],8)",
1244 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1245 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1246 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1247 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1248 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1249 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1250 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1251 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1252 "&vprotd (@x[$b0],@x[$b0],7)",
1253 "&vprotd (@x[$b1],@x[$b1],7)",
1254 "&vprotd (@x[$b2],@x[$b2],7)",
1255 "&vprotd (@x[$b3],@x[$b3],7)"
1259 my $xframe = $win64 ? 0xa8 : 8;
1262 .type ChaCha20_4xop,\@function,5
1267 mov %rsp,%r9 # frame pointer
1268 .cfi_def_cfa_register %r9
1269 sub \$0x140+$xframe,%rsp
1271 ################ stack layout
1272 # +0x00 SIMD equivalent of @x[8-12]
1274 # +0x40 constant copy of key[0-2] smashed by lanes
1276 # +0x100 SIMD counters (with nonce smashed by lanes)
1279 $code.=<<___ if ($win64);
1280 movaps %xmm6,-0xa8(%r9)
1281 movaps %xmm7,-0x98(%r9)
1282 movaps %xmm8,-0x88(%r9)
1283 movaps %xmm9,-0x78(%r9)
1284 movaps %xmm10,-0x68(%r9)
1285 movaps %xmm11,-0x58(%r9)
1286 movaps %xmm12,-0x48(%r9)
1287 movaps %xmm13,-0x38(%r9)
1288 movaps %xmm14,-0x28(%r9)
1289 movaps %xmm15,-0x18(%r9)
1295 vmovdqa .Lsigma(%rip),$xa3 # key[0]
1296 vmovdqu ($key),$xb3 # key[1]
1297 vmovdqu 16($key),$xt3 # key[2]
1298 vmovdqu ($counter),$xd3 # key[3]
1299 lea 0x100(%rsp),%rcx # size optimization
1301 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1302 vpshufd \$0x55,$xa3,$xa1
1303 vmovdqa $xa0,0x40(%rsp) # ... and offload
1304 vpshufd \$0xaa,$xa3,$xa2
1305 vmovdqa $xa1,0x50(%rsp)
1306 vpshufd \$0xff,$xa3,$xa3
1307 vmovdqa $xa2,0x60(%rsp)
1308 vmovdqa $xa3,0x70(%rsp)
1310 vpshufd \$0x00,$xb3,$xb0
1311 vpshufd \$0x55,$xb3,$xb1
1312 vmovdqa $xb0,0x80-0x100(%rcx)
1313 vpshufd \$0xaa,$xb3,$xb2
1314 vmovdqa $xb1,0x90-0x100(%rcx)
1315 vpshufd \$0xff,$xb3,$xb3
1316 vmovdqa $xb2,0xa0-0x100(%rcx)
1317 vmovdqa $xb3,0xb0-0x100(%rcx)
1319 vpshufd \$0x00,$xt3,$xt0 # "$xc0"
1320 vpshufd \$0x55,$xt3,$xt1 # "$xc1"
1321 vmovdqa $xt0,0xc0-0x100(%rcx)
1322 vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
1323 vmovdqa $xt1,0xd0-0x100(%rcx)
1324 vpshufd \$0xff,$xt3,$xt3 # "$xc3"
1325 vmovdqa $xt2,0xe0-0x100(%rcx)
1326 vmovdqa $xt3,0xf0-0x100(%rcx)
1328 vpshufd \$0x00,$xd3,$xd0
1329 vpshufd \$0x55,$xd3,$xd1
1330 vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
1331 vpshufd \$0xaa,$xd3,$xd2
1332 vmovdqa $xd1,0x110-0x100(%rcx)
1333 vpshufd \$0xff,$xd3,$xd3
1334 vmovdqa $xd2,0x120-0x100(%rcx)
1335 vmovdqa $xd3,0x130-0x100(%rcx)
1341 vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
1342 vmovdqa 0x50(%rsp),$xa1
1343 vmovdqa 0x60(%rsp),$xa2
1344 vmovdqa 0x70(%rsp),$xa3
1345 vmovdqa 0x80-0x100(%rcx),$xb0
1346 vmovdqa 0x90-0x100(%rcx),$xb1
1347 vmovdqa 0xa0-0x100(%rcx),$xb2
1348 vmovdqa 0xb0-0x100(%rcx),$xb3
1349 vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
1350 vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
1351 vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
1352 vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
1353 vmovdqa 0x100-0x100(%rcx),$xd0
1354 vmovdqa 0x110-0x100(%rcx),$xd1
1355 vmovdqa 0x120-0x100(%rcx),$xd2
1356 vmovdqa 0x130-0x100(%rcx),$xd3
1357 vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
1361 vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
1367 foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1368 foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1373 vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
1374 vpaddd 0x50(%rsp),$xa1,$xa1
1375 vpaddd 0x60(%rsp),$xa2,$xa2
1376 vpaddd 0x70(%rsp),$xa3,$xa3
1378 vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
1379 vmovdqa $xt3,0x30(%rsp)
1381 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1382 vpunpckldq $xa3,$xa2,$xt3
1383 vpunpckhdq $xa1,$xa0,$xa0
1384 vpunpckhdq $xa3,$xa2,$xa2
1385 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1386 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1387 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1388 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1390 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1392 vpaddd 0x80-0x100(%rcx),$xb0,$xb0
1393 vpaddd 0x90-0x100(%rcx),$xb1,$xb1
1394 vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
1395 vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
1397 vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
1398 vmovdqa $xa1,0x10(%rsp)
1399 vmovdqa 0x20(%rsp),$xa0 # "xc2"
1400 vmovdqa 0x30(%rsp),$xa1 # "xc3"
1402 vpunpckldq $xb1,$xb0,$xt2
1403 vpunpckldq $xb3,$xb2,$xt3
1404 vpunpckhdq $xb1,$xb0,$xb0
1405 vpunpckhdq $xb3,$xb2,$xb2
1406 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1407 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1408 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1409 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1411 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1412 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1414 vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
1415 vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
1416 vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
1417 vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
1419 vpunpckldq $xc1,$xc0,$xt2
1420 vpunpckldq $xc3,$xc2,$xt3
1421 vpunpckhdq $xc1,$xc0,$xc0
1422 vpunpckhdq $xc3,$xc2,$xc2
1423 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1424 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1425 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1426 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1428 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1430 vpaddd 0x100-0x100(%rcx),$xd0,$xd0
1431 vpaddd 0x110-0x100(%rcx),$xd1,$xd1
1432 vpaddd 0x120-0x100(%rcx),$xd2,$xd2
1433 vpaddd 0x130-0x100(%rcx),$xd3,$xd3
1435 vpunpckldq $xd1,$xd0,$xt2
1436 vpunpckldq $xd3,$xd2,$xt3
1437 vpunpckhdq $xd1,$xd0,$xd0
1438 vpunpckhdq $xd3,$xd2,$xd2
1439 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1440 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1441 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1442 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1444 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1445 ($xa0,$xa1)=($xt2,$xt3);
1447 vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
1448 vmovdqa 0x10(%rsp),$xa1
1453 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1454 vpxor 0x10($inp),$xb0,$xb0
1455 vpxor 0x20($inp),$xc0,$xc0
1456 vpxor 0x30($inp),$xd0,$xd0
1457 vpxor 0x40($inp),$xa1,$xa1
1458 vpxor 0x50($inp),$xb1,$xb1
1459 vpxor 0x60($inp),$xc1,$xc1
1460 vpxor 0x70($inp),$xd1,$xd1
1461 lea 0x80($inp),$inp # size optimization
1462 vpxor 0x00($inp),$xa2,$xa2
1463 vpxor 0x10($inp),$xb2,$xb2
1464 vpxor 0x20($inp),$xc2,$xc2
1465 vpxor 0x30($inp),$xd2,$xd2
1466 vpxor 0x40($inp),$xa3,$xa3
1467 vpxor 0x50($inp),$xb3,$xb3
1468 vpxor 0x60($inp),$xc3,$xc3
1469 vpxor 0x70($inp),$xd3,$xd3
1470 lea 0x80($inp),$inp # inp+=64*4
1472 vmovdqu $xa0,0x00($out)
1473 vmovdqu $xb0,0x10($out)
1474 vmovdqu $xc0,0x20($out)
1475 vmovdqu $xd0,0x30($out)
1476 vmovdqu $xa1,0x40($out)
1477 vmovdqu $xb1,0x50($out)
1478 vmovdqu $xc1,0x60($out)
1479 vmovdqu $xd1,0x70($out)
1480 lea 0x80($out),$out # size optimization
1481 vmovdqu $xa2,0x00($out)
1482 vmovdqu $xb2,0x10($out)
1483 vmovdqu $xc2,0x20($out)
1484 vmovdqu $xd2,0x30($out)
1485 vmovdqu $xa3,0x40($out)
1486 vmovdqu $xb3,0x50($out)
1487 vmovdqu $xc3,0x60($out)
1488 vmovdqu $xd3,0x70($out)
1489 lea 0x80($out),$out # out+=64*4
1499 jae .L192_or_more4xop
1501 jae .L128_or_more4xop
1503 jae .L64_or_more4xop
1506 vmovdqa $xa0,0x00(%rsp)
1507 vmovdqa $xb0,0x10(%rsp)
1508 vmovdqa $xc0,0x20(%rsp)
1509 vmovdqa $xd0,0x30(%rsp)
1514 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1515 vpxor 0x10($inp),$xb0,$xb0
1516 vpxor 0x20($inp),$xc0,$xc0
1517 vpxor 0x30($inp),$xd0,$xd0
1518 vmovdqu $xa0,0x00($out)
1519 vmovdqu $xb0,0x10($out)
1520 vmovdqu $xc0,0x20($out)
1521 vmovdqu $xd0,0x30($out)
1524 lea 0x40($inp),$inp # inp+=64*1
1525 vmovdqa $xa1,0x00(%rsp)
1527 vmovdqa $xb1,0x10(%rsp)
1528 lea 0x40($out),$out # out+=64*1
1529 vmovdqa $xc1,0x20(%rsp)
1530 sub \$64,$len # len-=64*1
1531 vmovdqa $xd1,0x30(%rsp)
1536 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1537 vpxor 0x10($inp),$xb0,$xb0
1538 vpxor 0x20($inp),$xc0,$xc0
1539 vpxor 0x30($inp),$xd0,$xd0
1540 vpxor 0x40($inp),$xa1,$xa1
1541 vpxor 0x50($inp),$xb1,$xb1
1542 vpxor 0x60($inp),$xc1,$xc1
1543 vpxor 0x70($inp),$xd1,$xd1
1545 vmovdqu $xa0,0x00($out)
1546 vmovdqu $xb0,0x10($out)
1547 vmovdqu $xc0,0x20($out)
1548 vmovdqu $xd0,0x30($out)
1549 vmovdqu $xa1,0x40($out)
1550 vmovdqu $xb1,0x50($out)
1551 vmovdqu $xc1,0x60($out)
1552 vmovdqu $xd1,0x70($out)
1555 lea 0x80($inp),$inp # inp+=64*2
1556 vmovdqa $xa2,0x00(%rsp)
1558 vmovdqa $xb2,0x10(%rsp)
1559 lea 0x80($out),$out # out+=64*2
1560 vmovdqa $xc2,0x20(%rsp)
1561 sub \$128,$len # len-=64*2
1562 vmovdqa $xd2,0x30(%rsp)
1567 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1568 vpxor 0x10($inp),$xb0,$xb0
1569 vpxor 0x20($inp),$xc0,$xc0
1570 vpxor 0x30($inp),$xd0,$xd0
1571 vpxor 0x40($inp),$xa1,$xa1
1572 vpxor 0x50($inp),$xb1,$xb1
1573 vpxor 0x60($inp),$xc1,$xc1
1574 vpxor 0x70($inp),$xd1,$xd1
1575 lea 0x80($inp),$inp # size optimization
1576 vpxor 0x00($inp),$xa2,$xa2
1577 vpxor 0x10($inp),$xb2,$xb2
1578 vpxor 0x20($inp),$xc2,$xc2
1579 vpxor 0x30($inp),$xd2,$xd2
1581 vmovdqu $xa0,0x00($out)
1582 vmovdqu $xb0,0x10($out)
1583 vmovdqu $xc0,0x20($out)
1584 vmovdqu $xd0,0x30($out)
1585 vmovdqu $xa1,0x40($out)
1586 vmovdqu $xb1,0x50($out)
1587 vmovdqu $xc1,0x60($out)
1588 vmovdqu $xd1,0x70($out)
1589 lea 0x80($out),$out # size optimization
1590 vmovdqu $xa2,0x00($out)
1591 vmovdqu $xb2,0x10($out)
1592 vmovdqu $xc2,0x20($out)
1593 vmovdqu $xd2,0x30($out)
1596 lea 0x40($inp),$inp # inp+=64*3
1597 vmovdqa $xa3,0x00(%rsp)
1599 vmovdqa $xb3,0x10(%rsp)
1600 lea 0x40($out),$out # out+=64*3
1601 vmovdqa $xc3,0x20(%rsp)
1602 sub \$192,$len # len-=64*3
1603 vmovdqa $xd3,0x30(%rsp)
1606 movzb ($inp,%r10),%eax
1607 movzb (%rsp,%r10),%ecx
1610 mov %al,-1($out,%r10)
1617 $code.=<<___ if ($win64);
1618 movaps -0xa8(%r9),%xmm6
1619 movaps -0x98(%r9),%xmm7
1620 movaps -0x88(%r9),%xmm8
1621 movaps -0x78(%r9),%xmm9
1622 movaps -0x68(%r9),%xmm10
1623 movaps -0x58(%r9),%xmm11
1624 movaps -0x48(%r9),%xmm12
1625 movaps -0x38(%r9),%xmm13
1626 movaps -0x28(%r9),%xmm14
1627 movaps -0x18(%r9),%xmm15
1631 .cfi_def_cfa_register %rsp
1635 .size ChaCha20_4xop,.-ChaCha20_4xop
1639 ########################################################################
1642 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1643 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1644 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1645 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1647 sub AVX2_lane_ROUND {
1648 my ($a0,$b0,$c0,$d0)=@_;
1649 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1650 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1651 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1652 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1653 my @x=map("\"$_\"",@xx);
1655 # Consider order in which variables are addressed by their
1660 # 0 4 8 12 < even round
1664 # 0 5 10 15 < odd round
1669 # 'a', 'b' and 'd's are permanently allocated in registers,
1670 # @x[0..7,12..15], while 'c's are maintained in memory. If
1671 # you observe 'c' column, you'll notice that pair of 'c's is
1672 # invariant between rounds. This means that we have to reload
1673 # them once per round, in the middle. This is why you'll see
1674 # bunch of 'c' stores and loads in the middle, but none in
1675 # the beginning or end.
1678 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1679 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1680 "&vpshufb (@x[$d0],@x[$d0],$t1)",
1681 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1682 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1683 "&vpshufb (@x[$d1],@x[$d1],$t1)",
1685 "&vpaddd ($xc,$xc,@x[$d0])",
1686 "&vpxor (@x[$b0],$xc,@x[$b0])",
1687 "&vpslld ($t0,@x[$b0],12)",
1688 "&vpsrld (@x[$b0],@x[$b0],20)",
1689 "&vpor (@x[$b0],$t0,@x[$b0])",
1690 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1691 "&vpaddd ($xc_,$xc_,@x[$d1])",
1692 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1693 "&vpslld ($t1,@x[$b1],12)",
1694 "&vpsrld (@x[$b1],@x[$b1],20)",
1695 "&vpor (@x[$b1],$t1,@x[$b1])",
1697 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
1698 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1699 "&vpshufb (@x[$d0],@x[$d0],$t0)",
1700 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
1701 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1702 "&vpshufb (@x[$d1],@x[$d1],$t0)",
1704 "&vpaddd ($xc,$xc,@x[$d0])",
1705 "&vpxor (@x[$b0],$xc,@x[$b0])",
1706 "&vpslld ($t1,@x[$b0],7)",
1707 "&vpsrld (@x[$b0],@x[$b0],25)",
1708 "&vpor (@x[$b0],$t1,@x[$b0])",
1709 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1710 "&vpaddd ($xc_,$xc_,@x[$d1])",
1711 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1712 "&vpslld ($t0,@x[$b1],7)",
1713 "&vpsrld (@x[$b1],@x[$b1],25)",
1714 "&vpor (@x[$b1],$t0,@x[$b1])",
1716 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
1717 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
1718 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
1719 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
1721 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1722 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1723 "&vpshufb (@x[$d2],@x[$d2],$t1)",
1724 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1725 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1726 "&vpshufb (@x[$d3],@x[$d3],$t1)",
1728 "&vpaddd ($xc,$xc,@x[$d2])",
1729 "&vpxor (@x[$b2],$xc,@x[$b2])",
1730 "&vpslld ($t0,@x[$b2],12)",
1731 "&vpsrld (@x[$b2],@x[$b2],20)",
1732 "&vpor (@x[$b2],$t0,@x[$b2])",
1733 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1734 "&vpaddd ($xc_,$xc_,@x[$d3])",
1735 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1736 "&vpslld ($t1,@x[$b3],12)",
1737 "&vpsrld (@x[$b3],@x[$b3],20)",
1738 "&vpor (@x[$b3],$t1,@x[$b3])",
1740 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1741 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1742 "&vpshufb (@x[$d2],@x[$d2],$t0)",
1743 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1744 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1745 "&vpshufb (@x[$d3],@x[$d3],$t0)",
1747 "&vpaddd ($xc,$xc,@x[$d2])",
1748 "&vpxor (@x[$b2],$xc,@x[$b2])",
1749 "&vpslld ($t1,@x[$b2],7)",
1750 "&vpsrld (@x[$b2],@x[$b2],25)",
1751 "&vpor (@x[$b2],$t1,@x[$b2])",
1752 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1753 "&vpaddd ($xc_,$xc_,@x[$d3])",
1754 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1755 "&vpslld ($t0,@x[$b3],7)",
1756 "&vpsrld (@x[$b3],@x[$b3],25)",
1757 "&vpor (@x[$b3],$t0,@x[$b3])"
1761 my $xframe = $win64 ? 0xa8 : 8;
1764 .type ChaCha20_8x,\@function,5
1769 mov %rsp,%r9 # frame register
1770 .cfi_def_cfa_register %r9
1771 sub \$0x280+$xframe,%rsp
1774 $code.=<<___ if ($win64);
1775 movaps %xmm6,-0xa8(%r9)
1776 movaps %xmm7,-0x98(%r9)
1777 movaps %xmm8,-0x88(%r9)
1778 movaps %xmm9,-0x78(%r9)
1779 movaps %xmm10,-0x68(%r9)
1780 movaps %xmm11,-0x58(%r9)
1781 movaps %xmm12,-0x48(%r9)
1782 movaps %xmm13,-0x38(%r9)
1783 movaps %xmm14,-0x28(%r9)
1784 movaps %xmm15,-0x18(%r9)
1790 ################ stack layout
1791 # +0x00 SIMD equivalent of @x[8-12]
1793 # +0x80 constant copy of key[0-2] smashed by lanes
1795 # +0x200 SIMD counters (with nonce smashed by lanes)
1799 vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
1800 vbroadcasti128 ($key),$xb3 # key[1]
1801 vbroadcasti128 16($key),$xt3 # key[2]
1802 vbroadcasti128 ($counter),$xd3 # key[3]
1803 lea 0x100(%rsp),%rcx # size optimization
1804 lea 0x200(%rsp),%rax # size optimization
1805 lea .Lrot16(%rip),%r10
1806 lea .Lrot24(%rip),%r11
1808 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1809 vpshufd \$0x55,$xa3,$xa1
1810 vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
1811 vpshufd \$0xaa,$xa3,$xa2
1812 vmovdqa $xa1,0xa0-0x100(%rcx)
1813 vpshufd \$0xff,$xa3,$xa3
1814 vmovdqa $xa2,0xc0-0x100(%rcx)
1815 vmovdqa $xa3,0xe0-0x100(%rcx)
1817 vpshufd \$0x00,$xb3,$xb0
1818 vpshufd \$0x55,$xb3,$xb1
1819 vmovdqa $xb0,0x100-0x100(%rcx)
1820 vpshufd \$0xaa,$xb3,$xb2
1821 vmovdqa $xb1,0x120-0x100(%rcx)
1822 vpshufd \$0xff,$xb3,$xb3
1823 vmovdqa $xb2,0x140-0x100(%rcx)
1824 vmovdqa $xb3,0x160-0x100(%rcx)
1826 vpshufd \$0x00,$xt3,$xt0 # "xc0"
1827 vpshufd \$0x55,$xt3,$xt1 # "xc1"
1828 vmovdqa $xt0,0x180-0x200(%rax)
1829 vpshufd \$0xaa,$xt3,$xt2 # "xc2"
1830 vmovdqa $xt1,0x1a0-0x200(%rax)
1831 vpshufd \$0xff,$xt3,$xt3 # "xc3"
1832 vmovdqa $xt2,0x1c0-0x200(%rax)
1833 vmovdqa $xt3,0x1e0-0x200(%rax)
1835 vpshufd \$0x00,$xd3,$xd0
1836 vpshufd \$0x55,$xd3,$xd1
1837 vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
1838 vpshufd \$0xaa,$xd3,$xd2
1839 vmovdqa $xd1,0x220-0x200(%rax)
1840 vpshufd \$0xff,$xd3,$xd3
1841 vmovdqa $xd2,0x240-0x200(%rax)
1842 vmovdqa $xd3,0x260-0x200(%rax)
1848 vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
1849 vmovdqa 0xa0-0x100(%rcx),$xa1
1850 vmovdqa 0xc0-0x100(%rcx),$xa2
1851 vmovdqa 0xe0-0x100(%rcx),$xa3
1852 vmovdqa 0x100-0x100(%rcx),$xb0
1853 vmovdqa 0x120-0x100(%rcx),$xb1
1854 vmovdqa 0x140-0x100(%rcx),$xb2
1855 vmovdqa 0x160-0x100(%rcx),$xb3
1856 vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
1857 vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
1858 vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
1859 vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
1860 vmovdqa 0x200-0x200(%rax),$xd0
1861 vmovdqa 0x220-0x200(%rax),$xd1
1862 vmovdqa 0x240-0x200(%rax),$xd2
1863 vmovdqa 0x260-0x200(%rax),$xd3
1864 vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
1867 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
1868 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
1869 vbroadcasti128 (%r10),$xt3
1870 vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
1877 foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1878 foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1883 lea 0x200(%rsp),%rax # size optimization
1884 vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
1885 vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
1886 vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
1887 vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
1889 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1890 vpunpckldq $xa3,$xa2,$xt3
1891 vpunpckhdq $xa1,$xa0,$xa0
1892 vpunpckhdq $xa3,$xa2,$xa2
1893 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1894 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1895 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1896 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1898 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1900 vpaddd 0x100-0x100(%rcx),$xb0,$xb0
1901 vpaddd 0x120-0x100(%rcx),$xb1,$xb1
1902 vpaddd 0x140-0x100(%rcx),$xb2,$xb2
1903 vpaddd 0x160-0x100(%rcx),$xb3,$xb3
1905 vpunpckldq $xb1,$xb0,$xt2
1906 vpunpckldq $xb3,$xb2,$xt3
1907 vpunpckhdq $xb1,$xb0,$xb0
1908 vpunpckhdq $xb3,$xb2,$xb2
1909 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1910 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1911 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1912 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1914 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1916 vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
1917 vperm2i128 \$0x31,$xb0,$xa0,$xb0
1918 vperm2i128 \$0x20,$xb1,$xa1,$xa0
1919 vperm2i128 \$0x31,$xb1,$xa1,$xb1
1920 vperm2i128 \$0x20,$xb2,$xa2,$xa1
1921 vperm2i128 \$0x31,$xb2,$xa2,$xb2
1922 vperm2i128 \$0x20,$xb3,$xa3,$xa2
1923 vperm2i128 \$0x31,$xb3,$xa3,$xb3
1925 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1926 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1928 vmovdqa $xa0,0x00(%rsp) # offload $xaN
1929 vmovdqa $xa1,0x20(%rsp)
1930 vmovdqa 0x40(%rsp),$xc2 # $xa0
1931 vmovdqa 0x60(%rsp),$xc3 # $xa1
1933 vpaddd 0x180-0x200(%rax),$xc0,$xc0
1934 vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
1935 vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
1936 vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
1938 vpunpckldq $xc1,$xc0,$xt2
1939 vpunpckldq $xc3,$xc2,$xt3
1940 vpunpckhdq $xc1,$xc0,$xc0
1941 vpunpckhdq $xc3,$xc2,$xc2
1942 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1943 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1944 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1945 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1947 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1949 vpaddd 0x200-0x200(%rax),$xd0,$xd0
1950 vpaddd 0x220-0x200(%rax),$xd1,$xd1
1951 vpaddd 0x240-0x200(%rax),$xd2,$xd2
1952 vpaddd 0x260-0x200(%rax),$xd3,$xd3
1954 vpunpckldq $xd1,$xd0,$xt2
1955 vpunpckldq $xd3,$xd2,$xt3
1956 vpunpckhdq $xd1,$xd0,$xd0
1957 vpunpckhdq $xd3,$xd2,$xd2
1958 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1959 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1960 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1961 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1963 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1965 vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
1966 vperm2i128 \$0x31,$xd0,$xc0,$xd0
1967 vperm2i128 \$0x20,$xd1,$xc1,$xc0
1968 vperm2i128 \$0x31,$xd1,$xc1,$xd1
1969 vperm2i128 \$0x20,$xd2,$xc2,$xc1
1970 vperm2i128 \$0x31,$xd2,$xc2,$xd2
1971 vperm2i128 \$0x20,$xd3,$xc3,$xc2
1972 vperm2i128 \$0x31,$xd3,$xc3,$xd3
1974 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1975 ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1976 ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1977 ($xa0,$xa1)=($xt2,$xt3);
1979 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
1980 vmovdqa 0x20(%rsp),$xa1
1985 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1986 vpxor 0x20($inp),$xb0,$xb0
1987 vpxor 0x40($inp),$xc0,$xc0
1988 vpxor 0x60($inp),$xd0,$xd0
1989 lea 0x80($inp),$inp # size optimization
1990 vmovdqu $xa0,0x00($out)
1991 vmovdqu $xb0,0x20($out)
1992 vmovdqu $xc0,0x40($out)
1993 vmovdqu $xd0,0x60($out)
1994 lea 0x80($out),$out # size optimization
1996 vpxor 0x00($inp),$xa1,$xa1
1997 vpxor 0x20($inp),$xb1,$xb1
1998 vpxor 0x40($inp),$xc1,$xc1
1999 vpxor 0x60($inp),$xd1,$xd1
2000 lea 0x80($inp),$inp # size optimization
2001 vmovdqu $xa1,0x00($out)
2002 vmovdqu $xb1,0x20($out)
2003 vmovdqu $xc1,0x40($out)
2004 vmovdqu $xd1,0x60($out)
2005 lea 0x80($out),$out # size optimization
2007 vpxor 0x00($inp),$xa2,$xa2
2008 vpxor 0x20($inp),$xb2,$xb2
2009 vpxor 0x40($inp),$xc2,$xc2
2010 vpxor 0x60($inp),$xd2,$xd2
2011 lea 0x80($inp),$inp # size optimization
2012 vmovdqu $xa2,0x00($out)
2013 vmovdqu $xb2,0x20($out)
2014 vmovdqu $xc2,0x40($out)
2015 vmovdqu $xd2,0x60($out)
2016 lea 0x80($out),$out # size optimization
2018 vpxor 0x00($inp),$xa3,$xa3
2019 vpxor 0x20($inp),$xb3,$xb3
2020 vpxor 0x40($inp),$xc3,$xc3
2021 vpxor 0x60($inp),$xd3,$xd3
2022 lea 0x80($inp),$inp # size optimization
2023 vmovdqu $xa3,0x00($out)
2024 vmovdqu $xb3,0x20($out)
2025 vmovdqu $xc3,0x40($out)
2026 vmovdqu $xd3,0x60($out)
2027 lea 0x80($out),$out # size optimization
2051 vmovdqa $xa0,0x00(%rsp)
2052 vmovdqa $xb0,0x20(%rsp)
2057 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2058 vpxor 0x20($inp),$xb0,$xb0
2059 vmovdqu $xa0,0x00($out)
2060 vmovdqu $xb0,0x20($out)
2063 lea 0x40($inp),$inp # inp+=64*1
2065 vmovdqa $xc0,0x00(%rsp)
2066 lea 0x40($out),$out # out+=64*1
2067 sub \$64,$len # len-=64*1
2068 vmovdqa $xd0,0x20(%rsp)
2073 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2074 vpxor 0x20($inp),$xb0,$xb0
2075 vpxor 0x40($inp),$xc0,$xc0
2076 vpxor 0x60($inp),$xd0,$xd0
2077 vmovdqu $xa0,0x00($out)
2078 vmovdqu $xb0,0x20($out)
2079 vmovdqu $xc0,0x40($out)
2080 vmovdqu $xd0,0x60($out)
2083 lea 0x80($inp),$inp # inp+=64*2
2085 vmovdqa $xa1,0x00(%rsp)
2086 lea 0x80($out),$out # out+=64*2
2087 sub \$128,$len # len-=64*2
2088 vmovdqa $xb1,0x20(%rsp)
2093 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2094 vpxor 0x20($inp),$xb0,$xb0
2095 vpxor 0x40($inp),$xc0,$xc0
2096 vpxor 0x60($inp),$xd0,$xd0
2097 vpxor 0x80($inp),$xa1,$xa1
2098 vpxor 0xa0($inp),$xb1,$xb1
2099 vmovdqu $xa0,0x00($out)
2100 vmovdqu $xb0,0x20($out)
2101 vmovdqu $xc0,0x40($out)
2102 vmovdqu $xd0,0x60($out)
2103 vmovdqu $xa1,0x80($out)
2104 vmovdqu $xb1,0xa0($out)
2107 lea 0xc0($inp),$inp # inp+=64*3
2109 vmovdqa $xc1,0x00(%rsp)
2110 lea 0xc0($out),$out # out+=64*3
2111 sub \$192,$len # len-=64*3
2112 vmovdqa $xd1,0x20(%rsp)
2117 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2118 vpxor 0x20($inp),$xb0,$xb0
2119 vpxor 0x40($inp),$xc0,$xc0
2120 vpxor 0x60($inp),$xd0,$xd0
2121 vpxor 0x80($inp),$xa1,$xa1
2122 vpxor 0xa0($inp),$xb1,$xb1
2123 vpxor 0xc0($inp),$xc1,$xc1
2124 vpxor 0xe0($inp),$xd1,$xd1
2125 vmovdqu $xa0,0x00($out)
2126 vmovdqu $xb0,0x20($out)
2127 vmovdqu $xc0,0x40($out)
2128 vmovdqu $xd0,0x60($out)
2129 vmovdqu $xa1,0x80($out)
2130 vmovdqu $xb1,0xa0($out)
2131 vmovdqu $xc1,0xc0($out)
2132 vmovdqu $xd1,0xe0($out)
2135 lea 0x100($inp),$inp # inp+=64*4
2137 vmovdqa $xa2,0x00(%rsp)
2138 lea 0x100($out),$out # out+=64*4
2139 sub \$256,$len # len-=64*4
2140 vmovdqa $xb2,0x20(%rsp)
2145 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2146 vpxor 0x20($inp),$xb0,$xb0
2147 vpxor 0x40($inp),$xc0,$xc0
2148 vpxor 0x60($inp),$xd0,$xd0
2149 vpxor 0x80($inp),$xa1,$xa1
2150 vpxor 0xa0($inp),$xb1,$xb1
2151 vpxor 0xc0($inp),$xc1,$xc1
2152 vpxor 0xe0($inp),$xd1,$xd1
2153 vpxor 0x100($inp),$xa2,$xa2
2154 vpxor 0x120($inp),$xb2,$xb2
2155 vmovdqu $xa0,0x00($out)
2156 vmovdqu $xb0,0x20($out)
2157 vmovdqu $xc0,0x40($out)
2158 vmovdqu $xd0,0x60($out)
2159 vmovdqu $xa1,0x80($out)
2160 vmovdqu $xb1,0xa0($out)
2161 vmovdqu $xc1,0xc0($out)
2162 vmovdqu $xd1,0xe0($out)
2163 vmovdqu $xa2,0x100($out)
2164 vmovdqu $xb2,0x120($out)
2167 lea 0x140($inp),$inp # inp+=64*5
2169 vmovdqa $xc2,0x00(%rsp)
2170 lea 0x140($out),$out # out+=64*5
2171 sub \$320,$len # len-=64*5
2172 vmovdqa $xd2,0x20(%rsp)
2177 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2178 vpxor 0x20($inp),$xb0,$xb0
2179 vpxor 0x40($inp),$xc0,$xc0
2180 vpxor 0x60($inp),$xd0,$xd0
2181 vpxor 0x80($inp),$xa1,$xa1
2182 vpxor 0xa0($inp),$xb1,$xb1
2183 vpxor 0xc0($inp),$xc1,$xc1
2184 vpxor 0xe0($inp),$xd1,$xd1
2185 vpxor 0x100($inp),$xa2,$xa2
2186 vpxor 0x120($inp),$xb2,$xb2
2187 vpxor 0x140($inp),$xc2,$xc2
2188 vpxor 0x160($inp),$xd2,$xd2
2189 vmovdqu $xa0,0x00($out)
2190 vmovdqu $xb0,0x20($out)
2191 vmovdqu $xc0,0x40($out)
2192 vmovdqu $xd0,0x60($out)
2193 vmovdqu $xa1,0x80($out)
2194 vmovdqu $xb1,0xa0($out)
2195 vmovdqu $xc1,0xc0($out)
2196 vmovdqu $xd1,0xe0($out)
2197 vmovdqu $xa2,0x100($out)
2198 vmovdqu $xb2,0x120($out)
2199 vmovdqu $xc2,0x140($out)
2200 vmovdqu $xd2,0x160($out)
2203 lea 0x180($inp),$inp # inp+=64*6
2205 vmovdqa $xa3,0x00(%rsp)
2206 lea 0x180($out),$out # out+=64*6
2207 sub \$384,$len # len-=64*6
2208 vmovdqa $xb3,0x20(%rsp)
2213 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2214 vpxor 0x20($inp),$xb0,$xb0
2215 vpxor 0x40($inp),$xc0,$xc0
2216 vpxor 0x60($inp),$xd0,$xd0
2217 vpxor 0x80($inp),$xa1,$xa1
2218 vpxor 0xa0($inp),$xb1,$xb1
2219 vpxor 0xc0($inp),$xc1,$xc1
2220 vpxor 0xe0($inp),$xd1,$xd1
2221 vpxor 0x100($inp),$xa2,$xa2
2222 vpxor 0x120($inp),$xb2,$xb2
2223 vpxor 0x140($inp),$xc2,$xc2
2224 vpxor 0x160($inp),$xd2,$xd2
2225 vpxor 0x180($inp),$xa3,$xa3
2226 vpxor 0x1a0($inp),$xb3,$xb3
2227 vmovdqu $xa0,0x00($out)
2228 vmovdqu $xb0,0x20($out)
2229 vmovdqu $xc0,0x40($out)
2230 vmovdqu $xd0,0x60($out)
2231 vmovdqu $xa1,0x80($out)
2232 vmovdqu $xb1,0xa0($out)
2233 vmovdqu $xc1,0xc0($out)
2234 vmovdqu $xd1,0xe0($out)
2235 vmovdqu $xa2,0x100($out)
2236 vmovdqu $xb2,0x120($out)
2237 vmovdqu $xc2,0x140($out)
2238 vmovdqu $xd2,0x160($out)
2239 vmovdqu $xa3,0x180($out)
2240 vmovdqu $xb3,0x1a0($out)
2243 lea 0x1c0($inp),$inp # inp+=64*7
2245 vmovdqa $xc3,0x00(%rsp)
2246 lea 0x1c0($out),$out # out+=64*7
2247 sub \$448,$len # len-=64*7
2248 vmovdqa $xd3,0x20(%rsp)
2251 movzb ($inp,%r10),%eax
2252 movzb (%rsp,%r10),%ecx
2255 mov %al,-1($out,%r10)
2262 $code.=<<___ if ($win64);
2263 movaps -0xa8(%r9),%xmm6
2264 movaps -0x98(%r9),%xmm7
2265 movaps -0x88(%r9),%xmm8
2266 movaps -0x78(%r9),%xmm9
2267 movaps -0x68(%r9),%xmm10
2268 movaps -0x58(%r9),%xmm11
2269 movaps -0x48(%r9),%xmm12
2270 movaps -0x38(%r9),%xmm13
2271 movaps -0x28(%r9),%xmm14
2272 movaps -0x18(%r9),%xmm15
2276 .cfi_def_cfa_register %rsp
2280 .size ChaCha20_8x,.-ChaCha20_8x
2284 ########################################################################
2287 # This one handles shorter inputs...
2289 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2290 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2292 sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
2310 my $xframe = $win64 ? 32+8 : 8;
2313 .type ChaCha20_avx512,\@function,5
2318 mov %rsp,%r9 # frame pointer
2319 .cfi_def_cfa_register %r9
2323 sub \$64+$xframe,%rsp
2325 $code.=<<___ if ($win64);
2326 movaps %xmm6,-0x28(%r9)
2327 movaps %xmm7,-0x18(%r9)
2331 vbroadcasti32x4 .Lsigma(%rip),$a
2332 vbroadcasti32x4 ($key),$b
2333 vbroadcasti32x4 16($key),$c
2334 vbroadcasti32x4 ($counter),$d
2339 vpaddd .Lzeroz(%rip),$d,$d
2340 vmovdqa32 .Lfourz(%rip),$fourz
2341 mov \$10,$counter # reuse $counter
2350 vpaddd $fourz,$d_,$d
2359 &vpshufd ($c,$c,0b01001110);
2360 &vpshufd ($b,$b,0b00111001);
2361 &vpshufd ($d,$d,0b10010011);
2364 &vpshufd ($c,$c,0b01001110);
2365 &vpshufd ($b,$b,0b10010011);
2366 &vpshufd ($d,$d,0b00111001);
2369 &jnz (".Loop_avx512");
2380 vpxor 0x00($inp),%x#$a,$t0 # xor with input
2381 vpxor 0x10($inp),%x#$b,$t1
2382 vpxor 0x20($inp),%x#$c,$t2
2383 vpxor 0x30($inp),%x#$d,$t3
2384 lea 0x40($inp),$inp # inp+=64
2386 vmovdqu $t0,0x00($out) # write output
2387 vmovdqu $t1,0x10($out)
2388 vmovdqu $t2,0x20($out)
2389 vmovdqu $t3,0x30($out)
2390 lea 0x40($out),$out # out+=64
2394 vextracti32x4 \$1,$a,$t0
2395 vextracti32x4 \$1,$b,$t1
2396 vextracti32x4 \$1,$c,$t2
2397 vextracti32x4 \$1,$d,$t3
2402 vpxor 0x00($inp),$t0,$t0 # xor with input
2403 vpxor 0x10($inp),$t1,$t1
2404 vpxor 0x20($inp),$t2,$t2
2405 vpxor 0x30($inp),$t3,$t3
2406 lea 0x40($inp),$inp # inp+=64
2408 vmovdqu $t0,0x00($out) # write output
2409 vmovdqu $t1,0x10($out)
2410 vmovdqu $t2,0x20($out)
2411 vmovdqu $t3,0x30($out)
2412 lea 0x40($out),$out # out+=64
2416 vextracti32x4 \$2,$a,$t0
2417 vextracti32x4 \$2,$b,$t1
2418 vextracti32x4 \$2,$c,$t2
2419 vextracti32x4 \$2,$d,$t3
2424 vpxor 0x00($inp),$t0,$t0 # xor with input
2425 vpxor 0x10($inp),$t1,$t1
2426 vpxor 0x20($inp),$t2,$t2
2427 vpxor 0x30($inp),$t3,$t3
2428 lea 0x40($inp),$inp # inp+=64
2430 vmovdqu $t0,0x00($out) # write output
2431 vmovdqu $t1,0x10($out)
2432 vmovdqu $t2,0x20($out)
2433 vmovdqu $t3,0x30($out)
2434 lea 0x40($out),$out # out+=64
2438 vextracti32x4 \$3,$a,$t0
2439 vextracti32x4 \$3,$b,$t1
2440 vextracti32x4 \$3,$c,$t2
2441 vextracti32x4 \$3,$d,$t3
2446 vpxor 0x00($inp),$t0,$t0 # xor with input
2447 vpxor 0x10($inp),$t1,$t1
2448 vpxor 0x20($inp),$t2,$t2
2449 vpxor 0x30($inp),$t3,$t3
2450 lea 0x40($inp),$inp # inp+=64
2452 vmovdqu $t0,0x00($out) # write output
2453 vmovdqu $t1,0x10($out)
2454 vmovdqu $t2,0x20($out)
2455 vmovdqu $t3,0x30($out)
2456 lea 0x40($out),$out # out+=64
2458 jnz .Loop_outer_avx512
2464 vmovdqa %x#$a,0x00(%rsp)
2465 vmovdqa %x#$b,0x10(%rsp)
2466 vmovdqa %x#$c,0x20(%rsp)
2467 vmovdqa %x#$d,0x30(%rsp)
2469 jmp .Loop_tail_avx512
2473 vmovdqa $t0,0x00(%rsp)
2474 vmovdqa $t1,0x10(%rsp)
2475 vmovdqa $t2,0x20(%rsp)
2476 vmovdqa $t3,0x30(%rsp)
2480 movzb ($inp,$counter),%eax
2481 movzb (%rsp,$counter),%ecx
2482 lea 1($counter),$counter
2484 mov %al,-1($out,$counter)
2486 jnz .Loop_tail_avx512
2488 vmovdqa32 $a_,0x00(%rsp)
2493 $code.=<<___ if ($win64);
2494 movaps -0x28(%r9),%xmm6
2495 movaps -0x18(%r9),%xmm7
2499 .cfi_def_cfa_register %rsp
2503 .size ChaCha20_avx512,.-ChaCha20_avx512
2507 # This one handles longer inputs...
2509 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2510 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2511 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2512 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2513 my @key=map("%zmm$_",(16..31));
2514 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2516 sub AVX512_lane_ROUND {
2517 my ($a0,$b0,$c0,$d0)=@_;
2518 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2519 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2520 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2521 my @x=map("\"$_\"",@xx);
2524 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
2525 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
2526 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
2527 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
2528 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2529 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2530 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2531 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2532 "&vprold (@x[$d0],@x[$d0],16)",
2533 "&vprold (@x[$d1],@x[$d1],16)",
2534 "&vprold (@x[$d2],@x[$d2],16)",
2535 "&vprold (@x[$d3],@x[$d3],16)",
2537 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2538 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2539 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2540 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2541 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2542 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2543 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2544 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2545 "&vprold (@x[$b0],@x[$b0],12)",
2546 "&vprold (@x[$b1],@x[$b1],12)",
2547 "&vprold (@x[$b2],@x[$b2],12)",
2548 "&vprold (@x[$b3],@x[$b3],12)",
2550 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
2551 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
2552 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
2553 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
2554 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2555 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2556 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2557 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2558 "&vprold (@x[$d0],@x[$d0],8)",
2559 "&vprold (@x[$d1],@x[$d1],8)",
2560 "&vprold (@x[$d2],@x[$d2],8)",
2561 "&vprold (@x[$d3],@x[$d3],8)",
2563 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2564 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2565 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2566 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2567 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2568 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2569 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2570 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2571 "&vprold (@x[$b0],@x[$b0],7)",
2572 "&vprold (@x[$b1],@x[$b1],7)",
2573 "&vprold (@x[$b2],@x[$b2],7)",
2574 "&vprold (@x[$b3],@x[$b3],7)"
2578 my $xframe = $win64 ? 0xa8 : 8;
2581 .type ChaCha20_16x,\@function,5
2586 mov %rsp,%r9 # frame register
2587 .cfi_def_cfa_register %r9
2588 sub \$64+$xframe,%rsp
2591 $code.=<<___ if ($win64);
2592 movaps %xmm6,-0xa8(%r9)
2593 movaps %xmm7,-0x98(%r9)
2594 movaps %xmm8,-0x88(%r9)
2595 movaps %xmm9,-0x78(%r9)
2596 movaps %xmm10,-0x68(%r9)
2597 movaps %xmm11,-0x58(%r9)
2598 movaps %xmm12,-0x48(%r9)
2599 movaps %xmm13,-0x38(%r9)
2600 movaps %xmm14,-0x28(%r9)
2601 movaps %xmm15,-0x18(%r9)
2607 lea .Lsigma(%rip),%r10
2608 vbroadcasti32x4 (%r10),$xa3 # key[0]
2609 vbroadcasti32x4 ($key),$xb3 # key[1]
2610 vbroadcasti32x4 16($key),$xc3 # key[2]
2611 vbroadcasti32x4 ($counter),$xd3 # key[3]
2613 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
2614 vpshufd \$0x55,$xa3,$xa1
2615 vpshufd \$0xaa,$xa3,$xa2
2616 vpshufd \$0xff,$xa3,$xa3
2617 vmovdqa64 $xa0,@key[0]
2618 vmovdqa64 $xa1,@key[1]
2619 vmovdqa64 $xa2,@key[2]
2620 vmovdqa64 $xa3,@key[3]
2622 vpshufd \$0x00,$xb3,$xb0
2623 vpshufd \$0x55,$xb3,$xb1
2624 vpshufd \$0xaa,$xb3,$xb2
2625 vpshufd \$0xff,$xb3,$xb3
2626 vmovdqa64 $xb0,@key[4]
2627 vmovdqa64 $xb1,@key[5]
2628 vmovdqa64 $xb2,@key[6]
2629 vmovdqa64 $xb3,@key[7]
2631 vpshufd \$0x00,$xc3,$xc0
2632 vpshufd \$0x55,$xc3,$xc1
2633 vpshufd \$0xaa,$xc3,$xc2
2634 vpshufd \$0xff,$xc3,$xc3
2635 vmovdqa64 $xc0,@key[8]
2636 vmovdqa64 $xc1,@key[9]
2637 vmovdqa64 $xc2,@key[10]
2638 vmovdqa64 $xc3,@key[11]
2640 vpshufd \$0x00,$xd3,$xd0
2641 vpshufd \$0x55,$xd3,$xd1
2642 vpshufd \$0xaa,$xd3,$xd2
2643 vpshufd \$0xff,$xd3,$xd3
2644 vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
2645 vmovdqa64 $xd0,@key[12]
2646 vmovdqa64 $xd1,@key[13]
2647 vmovdqa64 $xd2,@key[14]
2648 vmovdqa64 $xd3,@key[15]
2655 vpbroadcastd 0(%r10),$xa0 # reload key
2656 vpbroadcastd 4(%r10),$xa1
2657 vpbroadcastd 8(%r10),$xa2
2658 vpbroadcastd 12(%r10),$xa3
2659 vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
2660 vmovdqa64 @key[4],$xb0
2661 vmovdqa64 @key[5],$xb1
2662 vmovdqa64 @key[6],$xb2
2663 vmovdqa64 @key[7],$xb3
2664 vmovdqa64 @key[8],$xc0
2665 vmovdqa64 @key[9],$xc1
2666 vmovdqa64 @key[10],$xc2
2667 vmovdqa64 @key[11],$xc3
2668 vmovdqa64 @key[12],$xd0
2669 vmovdqa64 @key[13],$xd1
2670 vmovdqa64 @key[14],$xd2
2671 vmovdqa64 @key[15],$xd3
2673 vmovdqa64 $xa0,@key[0]
2674 vmovdqa64 $xa1,@key[1]
2675 vmovdqa64 $xa2,@key[2]
2676 vmovdqa64 $xa3,@key[3]
2684 foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2685 foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2690 vpaddd @key[0],$xa0,$xa0 # accumulate key
2691 vpaddd @key[1],$xa1,$xa1
2692 vpaddd @key[2],$xa2,$xa2
2693 vpaddd @key[3],$xa3,$xa3
2695 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
2696 vpunpckldq $xa3,$xa2,$xt3
2697 vpunpckhdq $xa1,$xa0,$xa0
2698 vpunpckhdq $xa3,$xa2,$xa2
2699 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
2700 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
2701 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
2702 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
2704 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2706 vpaddd @key[4],$xb0,$xb0
2707 vpaddd @key[5],$xb1,$xb1
2708 vpaddd @key[6],$xb2,$xb2
2709 vpaddd @key[7],$xb3,$xb3
2711 vpunpckldq $xb1,$xb0,$xt2
2712 vpunpckldq $xb3,$xb2,$xt3
2713 vpunpckhdq $xb1,$xb0,$xb0
2714 vpunpckhdq $xb3,$xb2,$xb2
2715 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
2716 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
2717 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
2718 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
2720 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2722 vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
2723 vshufi32x4 \$0xee,$xb0,$xa0,$xb0
2724 vshufi32x4 \$0x44,$xb1,$xa1,$xa0
2725 vshufi32x4 \$0xee,$xb1,$xa1,$xb1
2726 vshufi32x4 \$0x44,$xb2,$xa2,$xa1
2727 vshufi32x4 \$0xee,$xb2,$xa2,$xb2
2728 vshufi32x4 \$0x44,$xb3,$xa3,$xa2
2729 vshufi32x4 \$0xee,$xb3,$xa3,$xb3
2731 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2733 vpaddd @key[8],$xc0,$xc0
2734 vpaddd @key[9],$xc1,$xc1
2735 vpaddd @key[10],$xc2,$xc2
2736 vpaddd @key[11],$xc3,$xc3
2738 vpunpckldq $xc1,$xc0,$xt2
2739 vpunpckldq $xc3,$xc2,$xt3
2740 vpunpckhdq $xc1,$xc0,$xc0
2741 vpunpckhdq $xc3,$xc2,$xc2
2742 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
2743 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
2744 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
2745 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
2747 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2749 vpaddd @key[12],$xd0,$xd0
2750 vpaddd @key[13],$xd1,$xd1
2751 vpaddd @key[14],$xd2,$xd2
2752 vpaddd @key[15],$xd3,$xd3
2754 vpunpckldq $xd1,$xd0,$xt2
2755 vpunpckldq $xd3,$xd2,$xt3
2756 vpunpckhdq $xd1,$xd0,$xd0
2757 vpunpckhdq $xd3,$xd2,$xd2
2758 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
2759 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
2760 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
2761 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
2763 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2765 vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
2766 vshufi32x4 \$0xee,$xd0,$xc0,$xd0
2767 vshufi32x4 \$0x44,$xd1,$xc1,$xc0
2768 vshufi32x4 \$0xee,$xd1,$xc1,$xd1
2769 vshufi32x4 \$0x44,$xd2,$xc2,$xc1
2770 vshufi32x4 \$0xee,$xd2,$xc2,$xd2
2771 vshufi32x4 \$0x44,$xd3,$xc3,$xc2
2772 vshufi32x4 \$0xee,$xd3,$xc3,$xd3
2774 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2776 vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
2777 vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
2778 vshufi32x4 \$0x88,$xd0,$xb0,$xc0
2779 vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
2780 vshufi32x4 \$0x88,$xc1,$xa1,$xt1
2781 vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
2782 vshufi32x4 \$0x88,$xd1,$xb1,$xc1
2783 vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
2784 vshufi32x4 \$0x88,$xc2,$xa2,$xt2
2785 vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
2786 vshufi32x4 \$0x88,$xd2,$xb2,$xc2
2787 vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
2788 vshufi32x4 \$0x88,$xc3,$xa3,$xt3
2789 vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
2790 vshufi32x4 \$0x88,$xd3,$xb3,$xc3
2791 vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
2793 ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2794 ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2796 ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2797 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2798 ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2799 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2804 vpxord 0x00($inp),$xa0,$xa0 # xor with input
2805 vpxord 0x40($inp),$xb0,$xb0
2806 vpxord 0x80($inp),$xc0,$xc0
2807 vpxord 0xc0($inp),$xd0,$xd0
2808 vmovdqu32 $xa0,0x00($out)
2809 vmovdqu32 $xb0,0x40($out)
2810 vmovdqu32 $xc0,0x80($out)
2811 vmovdqu32 $xd0,0xc0($out)
2813 vpxord 0x100($inp),$xa1,$xa1
2814 vpxord 0x140($inp),$xb1,$xb1
2815 vpxord 0x180($inp),$xc1,$xc1
2816 vpxord 0x1c0($inp),$xd1,$xd1
2817 vmovdqu32 $xa1,0x100($out)
2818 vmovdqu32 $xb1,0x140($out)
2819 vmovdqu32 $xc1,0x180($out)
2820 vmovdqu32 $xd1,0x1c0($out)
2822 vpxord 0x200($inp),$xa2,$xa2
2823 vpxord 0x240($inp),$xb2,$xb2
2824 vpxord 0x280($inp),$xc2,$xc2
2825 vpxord 0x2c0($inp),$xd2,$xd2
2826 vmovdqu32 $xa2,0x200($out)
2827 vmovdqu32 $xb2,0x240($out)
2828 vmovdqu32 $xc2,0x280($out)
2829 vmovdqu32 $xd2,0x2c0($out)
2831 vpxord 0x300($inp),$xa3,$xa3
2832 vpxord 0x340($inp),$xb3,$xb3
2833 vpxord 0x380($inp),$xc3,$xc3
2834 vpxord 0x3c0($inp),$xd3,$xd3
2835 lea 0x400($inp),$inp
2836 vmovdqu32 $xa3,0x300($out)
2837 vmovdqu32 $xb3,0x340($out)
2838 vmovdqu32 $xc3,0x380($out)
2839 vmovdqu32 $xd3,0x3c0($out)
2840 lea 0x400($out),$out
2852 jb .Less_than_64_16x
2853 vpxord ($inp),$xa0,$xa0 # xor with input
2854 vmovdqu32 $xa0,($out,$inp)
2860 jb .Less_than_64_16x
2861 vpxord ($inp),$xb0,$xb0
2862 vmovdqu32 $xb0,($out,$inp)
2868 jb .Less_than_64_16x
2869 vpxord ($inp),$xc0,$xc0
2870 vmovdqu32 $xc0,($out,$inp)
2876 jb .Less_than_64_16x
2877 vpxord ($inp),$xd0,$xd0
2878 vmovdqu32 $xd0,($out,$inp)
2884 jb .Less_than_64_16x
2885 vpxord ($inp),$xa1,$xa1
2886 vmovdqu32 $xa1,($out,$inp)
2892 jb .Less_than_64_16x
2893 vpxord ($inp),$xb1,$xb1
2894 vmovdqu32 $xb1,($out,$inp)
2900 jb .Less_than_64_16x
2901 vpxord ($inp),$xc1,$xc1
2902 vmovdqu32 $xc1,($out,$inp)
2908 jb .Less_than_64_16x
2909 vpxord ($inp),$xd1,$xd1
2910 vmovdqu32 $xd1,($out,$inp)
2916 jb .Less_than_64_16x
2917 vpxord ($inp),$xa2,$xa2
2918 vmovdqu32 $xa2,($out,$inp)
2924 jb .Less_than_64_16x
2925 vpxord ($inp),$xb2,$xb2
2926 vmovdqu32 $xb2,($out,$inp)
2932 jb .Less_than_64_16x
2933 vpxord ($inp),$xc2,$xc2
2934 vmovdqu32 $xc2,($out,$inp)
2940 jb .Less_than_64_16x
2941 vpxord ($inp),$xd2,$xd2
2942 vmovdqu32 $xd2,($out,$inp)
2948 jb .Less_than_64_16x
2949 vpxord ($inp),$xa3,$xa3
2950 vmovdqu32 $xa3,($out,$inp)
2956 jb .Less_than_64_16x
2957 vpxord ($inp),$xb3,$xb3
2958 vmovdqu32 $xb3,($out,$inp)
2964 jb .Less_than_64_16x
2965 vpxord ($inp),$xc3,$xc3
2966 vmovdqu32 $xc3,($out,$inp)
2972 vmovdqa32 $xa0,0x00(%rsp)
2973 lea ($out,$inp),$out
2977 movzb ($inp,%r10),%eax
2978 movzb (%rsp,%r10),%ecx
2981 mov %al,-1($out,%r10)
2985 vpxord $xa0,$xa0,$xa0
2986 vmovdqa32 $xa0,0(%rsp)
2991 $code.=<<___ if ($win64);
2992 movaps -0xa8(%r9),%xmm6
2993 movaps -0x98(%r9),%xmm7
2994 movaps -0x88(%r9),%xmm8
2995 movaps -0x78(%r9),%xmm9
2996 movaps -0x68(%r9),%xmm10
2997 movaps -0x58(%r9),%xmm11
2998 movaps -0x48(%r9),%xmm12
2999 movaps -0x38(%r9),%xmm13
3000 movaps -0x28(%r9),%xmm14
3001 movaps -0x18(%r9),%xmm15
3005 .cfi_def_cfa_register %rsp
3009 .size ChaCha20_16x,.-ChaCha20_16x
3013 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3014 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3022 .extern __imp_RtlVirtualUnwind
3023 .type se_handler,\@abi-omnipotent
3037 mov 120($context),%rax # pull context->Rax
3038 mov 248($context),%rbx # pull context->Rip
3040 mov 8($disp),%rsi # disp->ImageBase
3041 mov 56($disp),%r11 # disp->HandlerData
3043 lea .Lctr32_body(%rip),%r10
3044 cmp %r10,%rbx # context->Rip<.Lprologue
3045 jb .Lcommon_seh_tail
3047 mov 152($context),%rax # pull context->Rsp
3049 lea .Lno_data(%rip),%r10 # epilogue label
3050 cmp %r10,%rbx # context->Rip>=.Lepilogue
3051 jae .Lcommon_seh_tail
3053 lea 64+24+48(%rax),%rax
3061 mov %rbx,144($context) # restore context->Rbx
3062 mov %rbp,160($context) # restore context->Rbp
3063 mov %r12,216($context) # restore context->R12
3064 mov %r13,224($context) # restore context->R13
3065 mov %r14,232($context) # restore context->R14
3066 mov %r15,240($context) # restore context->R14
3071 mov %rax,152($context) # restore context->Rsp
3072 mov %rsi,168($context) # restore context->Rsi
3073 mov %rdi,176($context) # restore context->Rdi
3075 mov 40($disp),%rdi # disp->ContextRecord
3076 mov $context,%rsi # context
3077 mov \$154,%ecx # sizeof(CONTEXT)
3078 .long 0xa548f3fc # cld; rep movsq
3081 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3082 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3083 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3084 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3085 mov 40(%rsi),%r10 # disp->ContextRecord
3086 lea 56(%rsi),%r11 # &disp->HandlerData
3087 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3088 mov %r10,32(%rsp) # arg5
3089 mov %r11,40(%rsp) # arg6
3090 mov %r12,48(%rsp) # arg7
3091 mov %rcx,56(%rsp) # arg8, (NULL)
3092 call *__imp_RtlVirtualUnwind(%rip)
3094 mov \$1,%eax # ExceptionContinueSearch
3106 .size se_handler,.-se_handler
3108 .type ssse3_handler,\@abi-omnipotent
3122 mov 120($context),%rax # pull context->Rax
3123 mov 248($context),%rbx # pull context->Rip
3125 mov 8($disp),%rsi # disp->ImageBase
3126 mov 56($disp),%r11 # disp->HandlerData
3128 mov 0(%r11),%r10d # HandlerData[0]
3129 lea (%rsi,%r10),%r10 # prologue label
3130 cmp %r10,%rbx # context->Rip<prologue label
3131 jb .Lcommon_seh_tail
3133 mov 192($context),%rax # pull context->R9
3135 mov 4(%r11),%r10d # HandlerData[1]
3136 lea (%rsi,%r10),%r10 # epilogue label
3137 cmp %r10,%rbx # context->Rip>=epilogue label
3138 jae .Lcommon_seh_tail
3140 lea -0x28(%rax),%rsi
3141 lea 512($context),%rdi # &context.Xmm6
3143 .long 0xa548f3fc # cld; rep movsq
3145 jmp .Lcommon_seh_tail
3146 .size ssse3_handler,.-ssse3_handler
3148 .type full_handler,\@abi-omnipotent
3162 mov 120($context),%rax # pull context->Rax
3163 mov 248($context),%rbx # pull context->Rip
3165 mov 8($disp),%rsi # disp->ImageBase
3166 mov 56($disp),%r11 # disp->HandlerData
3168 mov 0(%r11),%r10d # HandlerData[0]
3169 lea (%rsi,%r10),%r10 # prologue label
3170 cmp %r10,%rbx # context->Rip<prologue label
3171 jb .Lcommon_seh_tail
3173 mov 192($context),%rax # pull context->R9
3175 mov 4(%r11),%r10d # HandlerData[1]
3176 lea (%rsi,%r10),%r10 # epilogue label
3177 cmp %r10,%rbx # context->Rip>=epilogue label
3178 jae .Lcommon_seh_tail
3180 lea -0xa8(%rax),%rsi
3181 lea 512($context),%rdi # &context.Xmm6
3183 .long 0xa548f3fc # cld; rep movsq
3185 jmp .Lcommon_seh_tail
3186 .size full_handler,.-full_handler
3190 .rva .LSEH_begin_ChaCha20_ctr32
3191 .rva .LSEH_end_ChaCha20_ctr32
3192 .rva .LSEH_info_ChaCha20_ctr32
3194 .rva .LSEH_begin_ChaCha20_ssse3
3195 .rva .LSEH_end_ChaCha20_ssse3
3196 .rva .LSEH_info_ChaCha20_ssse3
3198 .rva .LSEH_begin_ChaCha20_4x
3199 .rva .LSEH_end_ChaCha20_4x
3200 .rva .LSEH_info_ChaCha20_4x
3202 $code.=<<___ if ($avx);
3203 .rva .LSEH_begin_ChaCha20_4xop
3204 .rva .LSEH_end_ChaCha20_4xop
3205 .rva .LSEH_info_ChaCha20_4xop
3207 $code.=<<___ if ($avx>1);
3208 .rva .LSEH_begin_ChaCha20_8x
3209 .rva .LSEH_end_ChaCha20_8x
3210 .rva .LSEH_info_ChaCha20_8x
3212 $code.=<<___ if ($avx>2);
3213 .rva .LSEH_begin_ChaCha20_avx512
3214 .rva .LSEH_end_ChaCha20_avx512
3215 .rva .LSEH_info_ChaCha20_avx512
3217 .rva .LSEH_begin_ChaCha20_16x
3218 .rva .LSEH_end_ChaCha20_16x
3219 .rva .LSEH_info_ChaCha20_16x
3224 .LSEH_info_ChaCha20_ctr32:
3228 .LSEH_info_ChaCha20_ssse3:
3231 .rva .Lssse3_body,.Lssse3_epilogue
3233 .LSEH_info_ChaCha20_4x:
3236 .rva .L4x_body,.L4x_epilogue
3238 $code.=<<___ if ($avx);
3239 .LSEH_info_ChaCha20_4xop:
3242 .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
3244 $code.=<<___ if ($avx>1);
3245 .LSEH_info_ChaCha20_8x:
3248 .rva .L8x_body,.L8x_epilogue # HandlerData[]
3250 $code.=<<___ if ($avx>2);
3251 .LSEH_info_ChaCha20_avx512:
3254 .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
3256 .LSEH_info_ChaCha20_16x:
3259 .rva .L16x_body,.L16x_epilogue # HandlerData[]
3263 foreach (split("\n",$code)) {
3264 s/\`([^\`]*)\`/eval $1/ge;
3266 s/%x#%[yz]/%x/g; # "down-shift"