2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # This module implements Poly1305 hash for x86_64.
25 # Add AVX512F+VL+BW code path.
29 # Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
30 # executed even on Knights Landing. Trigger for modification was
31 # observation that AVX512 code paths can negatively affect overall
32 # Skylake-X system performance. Since we are likely to suppress
33 # AVX512F capability flag [at least on Skylake-X], conversion serves
34 # as kind of "investment protection". Note that next *lake processor,
35 # Cannolake, has AVX512IFMA code path to execute...
37 # Numbers are cycles per processed byte with poly1305_blocks alone,
38 # measured with rdtsc at fixed clock frequency.
40 # IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512
43 # Westmere 1.88/+120% -
44 # Sandy Bridge 1.39/+140% 1.10
45 # Haswell 1.14/+175% 1.11 0.65
46 # Skylake[-X] 1.13/+120% 0.96 0.51 [0.35]
47 # Silvermont 2.83/+95% -
48 # Knights L 3.60/? 1.65 1.10 ?
49 # Goldmont 1.70/+180% -
50 # VIA Nano 1.82/+150% -
51 # Sledgehammer 1.38/+160% -
52 # Bulldozer 2.30/+130% 0.97
53 # Ryzen 1.15/+200% 1.08 1.18
55 # (*) improvement coefficients relative to clang are more modest and
56 # are ~50% on most processors, in both cases we are comparing to
58 # (**) SSE2 implementation was attempted, but among non-AVX processors
59 # it was faster than integer-only code only on older Intel P4 and
60 # Core processors, 50-30%, less newer processor is, but slower on
61 # contemporary ones, for example almost 2x slower on Atom, and as
62 # former are naturally disappearing, SSE2 is deemed unnecessary;
66 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
68 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
70 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
71 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
72 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
73 die "can't locate x86_64-xlate.pl";
75 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
76 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
77 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26);
80 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
81 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
82 $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12);
83 $avx += 2 if ($1==2.11 && $2>=8);
86 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
87 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
88 $avx = ($1>=10) + ($1>=12);
91 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
92 $avx = ($2>=3.0) + ($2>3.0);
95 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
98 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
99 my ($mac,$nonce)=($inp,$len); # *_emit arguments
100 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
101 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
103 sub poly1305_iteration {
104 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
105 # output: $h0-$h2 *= $r0-$r1
113 mov %rax,$h0 # future $h0
123 mov $h2,$h1 # borrow $h1
127 imulq $s1,$h1 # h2*s1
132 imulq $r0,$h2 # h2*r0
134 mov \$-4,%rax # mask value
137 and $d3,%rax # last reduction step
148 ########################################################################
149 # Layout of opaque area is following.
151 # unsigned __int64 h[3]; # current hash value base 2^64
152 # unsigned __int64 r[2]; # key value base 2^64
157 .extern OPENSSL_ia32cap_P
160 .hidden poly1305_init
161 .globl poly1305_blocks
162 .hidden poly1305_blocks
164 .hidden poly1305_emit
166 .type poly1305_init,\@function,3
170 mov %rax,0($ctx) # initialize hash value
177 lea poly1305_blocks(%rip),%r10
178 lea poly1305_emit(%rip),%r11
180 $code.=<<___ if ($avx);
181 mov OPENSSL_ia32cap_P+4(%rip),%r9
182 lea poly1305_blocks_avx(%rip),%rax
183 lea poly1305_emit_avx(%rip),%rcx
184 bt \$`60-32`,%r9 # AVX?
188 $code.=<<___ if ($avx>1);
189 lea poly1305_blocks_avx2(%rip),%rax
190 bt \$`5+32`,%r9 # AVX2?
193 $code.=<<___ if ($avx>3);
194 mov \$`(1<<31|1<<21|1<<16)`,%rax
201 mov \$0x0ffffffc0fffffff,%rax
202 mov \$0x0ffffffc0ffffffc,%rcx
208 $code.=<<___ if ($flavour !~ /elf32/);
212 $code.=<<___ if ($flavour =~ /elf32/);
220 .size poly1305_init,.-poly1305_init
222 .type poly1305_blocks,\@function,4
228 jz .Lno_data # too short
244 mov $len,%r15 # reassign $len
246 mov 24($ctx),$r0 # load r
249 mov 0($ctx),$h0 # load hash value
256 add $r1,$s1 # s1 = r1 + (r1 >> 2)
261 add 0($inp),$h0 # accumulate input
266 &poly1305_iteration();
272 mov $h0,0($ctx) # store hash value
289 .cfi_adjust_cfa_offset -48
294 .size poly1305_blocks,.-poly1305_blocks
296 .type poly1305_emit,\@function,3
300 mov 0($ctx),%r8 # load hash value
305 add \$5,%r8 # compare to modulus
309 shr \$2,%r10 # did 130-bit value overflow?
313 add 0($nonce),%rax # accumulate nonce
315 mov %rax,0($mac) # write result
319 .size poly1305_emit,.-poly1305_emit
323 ########################################################################
324 # Layout of opaque area is following.
326 # unsigned __int32 h[5]; # current hash value base 2^26
327 # unsigned __int32 is_base2_26;
328 # unsigned __int64 r[2]; # key value base 2^64
329 # unsigned __int64 pad;
330 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
332 # where r^n are base 2^26 digits of degrees of multiplier key. There are
333 # 5 digits, but last four are interleaved with multiples of 5, totalling
334 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
336 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
337 map("%xmm$_",(0..15));
340 .type __poly1305_block,\@abi-omnipotent
344 &poly1305_iteration();
347 .size __poly1305_block,.-__poly1305_block
349 .type __poly1305_init_avx,\@abi-omnipotent
356 lea 48+64($ctx),$ctx # size optimization
359 call __poly1305_block # r^2
361 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
367 mov %eax,`16*0+0-64`($ctx)
369 mov %edx,`16*0+4-64`($ctx)
376 mov %eax,`16*1+0-64`($ctx)
377 lea (%rax,%rax,4),%eax # *5
378 mov %edx,`16*1+4-64`($ctx)
379 lea (%rdx,%rdx,4),%edx # *5
380 mov %eax,`16*2+0-64`($ctx)
382 mov %edx,`16*2+4-64`($ctx)
393 mov %eax,`16*3+0-64`($ctx)
394 lea (%rax,%rax,4),%eax # *5
395 mov %edx,`16*3+4-64`($ctx)
396 lea (%rdx,%rdx,4),%edx # *5
397 mov %eax,`16*4+0-64`($ctx)
399 mov %edx,`16*4+4-64`($ctx)
408 mov %eax,`16*5+0-64`($ctx)
409 lea (%rax,%rax,4),%eax # *5
410 mov %edx,`16*5+4-64`($ctx)
411 lea (%rdx,%rdx,4),%edx # *5
412 mov %eax,`16*6+0-64`($ctx)
414 mov %edx,`16*6+4-64`($ctx)
420 mov $d1#d,`16*7+0-64`($ctx)
421 lea ($d1,$d1,4),$d1 # *5
422 mov $d2#d,`16*7+4-64`($ctx)
423 lea ($d2,$d2,4),$d2 # *5
424 mov $d1#d,`16*8+0-64`($ctx)
425 mov $d2#d,`16*8+4-64`($ctx)
428 call __poly1305_block # r^3
430 mov \$0x3ffffff,%eax # save r^3 base 2^26
434 mov %eax,`16*0+12-64`($ctx)
438 mov %edx,`16*1+12-64`($ctx)
439 lea (%rdx,%rdx,4),%edx # *5
441 mov %edx,`16*2+12-64`($ctx)
447 mov %eax,`16*3+12-64`($ctx)
448 lea (%rax,%rax,4),%eax # *5
450 mov %eax,`16*4+12-64`($ctx)
455 mov %edx,`16*5+12-64`($ctx)
456 lea (%rdx,%rdx,4),%edx # *5
458 mov %edx,`16*6+12-64`($ctx)
463 mov $d1#d,`16*7+12-64`($ctx)
464 lea ($d1,$d1,4),$d1 # *5
465 mov $d1#d,`16*8+12-64`($ctx)
468 call __poly1305_block # r^4
470 mov \$0x3ffffff,%eax # save r^4 base 2^26
474 mov %eax,`16*0+8-64`($ctx)
478 mov %edx,`16*1+8-64`($ctx)
479 lea (%rdx,%rdx,4),%edx # *5
481 mov %edx,`16*2+8-64`($ctx)
487 mov %eax,`16*3+8-64`($ctx)
488 lea (%rax,%rax,4),%eax # *5
490 mov %eax,`16*4+8-64`($ctx)
495 mov %edx,`16*5+8-64`($ctx)
496 lea (%rdx,%rdx,4),%edx # *5
498 mov %edx,`16*6+8-64`($ctx)
503 mov $d1#d,`16*7+8-64`($ctx)
504 lea ($d1,$d1,4),$d1 # *5
505 mov $d1#d,`16*8+8-64`($ctx)
507 lea -48-64($ctx),$ctx # size [de-]optimization
509 .size __poly1305_init_avx,.-__poly1305_init_avx
511 .type poly1305_blocks_avx,\@function,4
515 mov 20($ctx),%r8d # is_base2_26
547 mov $len,%r15 # reassign $len
549 mov 0($ctx),$d1 # load hash value
553 mov 24($ctx),$r0 # load r
556 ################################# base 2^26 -> base 2^64
558 and \$`-1*(1<<31)`,$d1
559 mov $d2,$r1 # borrow $r1
561 and \$`-1*(1<<31)`,$d2
575 adc \$0,$h2 # can be partially reduced...
577 mov \$-4,$d2 # ... so reduce
590 add $r1,$s1 # s1 = r1 + (r1 >> 2)
592 add 0($inp),$h0 # accumulate input
597 call __poly1305_block
599 test $padbit,$padbit # if $padbit is zero,
600 jz .Lstore_base2_64_avx # store hash in base 2^64 format
602 ################################# base 2^64 -> base 2^26
609 and \$0x3ffffff,%rax # h[0]
611 and \$0x3ffffff,%rdx # h[1]
615 and \$0x3ffffff,$h0 # h[2]
617 and \$0x3ffffff,$h1 # h[3]
621 jz .Lstore_base2_26_avx
631 .Lstore_base2_64_avx:
634 mov $h2,16($ctx) # note that is_base2_26 is zeroed
638 .Lstore_base2_26_avx:
639 mov %rax#d,0($ctx) # store hash value base 2^26
659 .cfi_adjust_cfa_offset -48
661 .Lblocks_avx_epilogue:
682 mov $len,%r15 # reassign $len
684 mov 24($ctx),$r0 # load r
687 mov 0($ctx),$h0 # load hash value
694 add $r1,$s1 # s1 = r1 + (r1 >> 2)
699 add 0($inp),$h0 # accumulate input
705 call __poly1305_block
708 ################################# base 2^64 -> base 2^26
715 and \$0x3ffffff,%rax # h[0]
717 and \$0x3ffffff,%rdx # h[1]
721 and \$0x3ffffff,$h0 # h[2]
723 and \$0x3ffffff,$h1 # h[3]
731 movl \$1,20($ctx) # set is_base2_26
733 call __poly1305_init_avx
752 .cfi_adjust_cfa_offset -48
753 .Lbase2_64_avx_epilogue:
760 vmovd 4*0($ctx),$H0 # load hash value
768 $code.=<<___ if (!$win64);
770 .cfi_def_cfa %r11,0x60
773 $code.=<<___ if ($win64);
776 vmovdqa %xmm6,0x50(%r11)
777 vmovdqa %xmm7,0x60(%r11)
778 vmovdqa %xmm8,0x70(%r11)
779 vmovdqa %xmm9,0x80(%r11)
780 vmovdqa %xmm10,0x90(%r11)
781 vmovdqa %xmm11,0xa0(%r11)
782 vmovdqa %xmm12,0xb0(%r11)
783 vmovdqa %xmm13,0xc0(%r11)
784 vmovdqa %xmm14,0xd0(%r11)
785 vmovdqa %xmm15,0xe0(%r11)
793 vmovdqu `16*3`($ctx),$D4 # preload r0^2
794 lea `16*3+64`($ctx),$ctx # size optimization
795 lea .Lconst(%rip),%rcx
797 ################################################################
799 vmovdqu 16*2($inp),$T0
800 vmovdqu 16*3($inp),$T1
801 vmovdqa 64(%rcx),$MASK # .Lmask26
803 vpsrldq \$6,$T0,$T2 # splat input
805 vpunpckhqdq $T1,$T0,$T4 # 4
806 vpunpcklqdq $T1,$T0,$T0 # 0:1
807 vpunpcklqdq $T3,$T2,$T3 # 2:3
809 vpsrlq \$40,$T4,$T4 # 4
811 vpand $MASK,$T0,$T0 # 0
813 vpand $MASK,$T1,$T1 # 1
815 vpand $MASK,$T2,$T2 # 2
816 vpand $MASK,$T3,$T3 # 3
817 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
821 # expand and copy pre-calculated table to stack
822 vmovdqu `16*1-64`($ctx),$D1
823 vmovdqu `16*2-64`($ctx),$D2
824 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
825 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
826 vmovdqa $D3,-0x90(%r11)
827 vmovdqa $D0,0x00(%rsp)
828 vpshufd \$0xEE,$D1,$D4
829 vmovdqu `16*3-64`($ctx),$D0
830 vpshufd \$0x44,$D1,$D1
831 vmovdqa $D4,-0x80(%r11)
832 vmovdqa $D1,0x10(%rsp)
833 vpshufd \$0xEE,$D2,$D3
834 vmovdqu `16*4-64`($ctx),$D1
835 vpshufd \$0x44,$D2,$D2
836 vmovdqa $D3,-0x70(%r11)
837 vmovdqa $D2,0x20(%rsp)
838 vpshufd \$0xEE,$D0,$D4
839 vmovdqu `16*5-64`($ctx),$D2
840 vpshufd \$0x44,$D0,$D0
841 vmovdqa $D4,-0x60(%r11)
842 vmovdqa $D0,0x30(%rsp)
843 vpshufd \$0xEE,$D1,$D3
844 vmovdqu `16*6-64`($ctx),$D0
845 vpshufd \$0x44,$D1,$D1
846 vmovdqa $D3,-0x50(%r11)
847 vmovdqa $D1,0x40(%rsp)
848 vpshufd \$0xEE,$D2,$D4
849 vmovdqu `16*7-64`($ctx),$D1
850 vpshufd \$0x44,$D2,$D2
851 vmovdqa $D4,-0x40(%r11)
852 vmovdqa $D2,0x50(%rsp)
853 vpshufd \$0xEE,$D0,$D3
854 vmovdqu `16*8-64`($ctx),$D2
855 vpshufd \$0x44,$D0,$D0
856 vmovdqa $D3,-0x30(%r11)
857 vmovdqa $D0,0x60(%rsp)
858 vpshufd \$0xEE,$D1,$D4
859 vpshufd \$0x44,$D1,$D1
860 vmovdqa $D4,-0x20(%r11)
861 vmovdqa $D1,0x70(%rsp)
862 vpshufd \$0xEE,$D2,$D3
863 vmovdqa 0x00(%rsp),$D4 # preload r0^2
864 vpshufd \$0x44,$D2,$D2
865 vmovdqa $D3,-0x10(%r11)
866 vmovdqa $D2,0x80(%rsp)
872 ################################################################
873 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
874 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
875 # \___________________/
876 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
877 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
878 # \___________________/ \____________________/
880 # Note that we start with inp[2:3]*r^2. This is because it
881 # doesn't depend on reduction in previous iteration.
882 ################################################################
883 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
884 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
885 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
886 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
887 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
889 # though note that $Tx and $Hx are "reversed" in this section,
890 # and $D4 is preloaded with r0^2...
892 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
893 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
894 vmovdqa $H2,0x20(%r11) # offload hash
895 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
896 vmovdqa 0x10(%rsp),$H2 # r1^2
897 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
898 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
900 vmovdqa $H0,0x00(%r11) #
901 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
902 vmovdqa $H1,0x10(%r11) #
903 vpmuludq $T3,$H2,$H1 # h3*r1
904 vpaddq $H0,$D0,$D0 # d0 += h4*s1
905 vpaddq $H1,$D4,$D4 # d4 += h3*r1
906 vmovdqa $H3,0x30(%r11) #
907 vpmuludq $T2,$H2,$H0 # h2*r1
908 vpmuludq $T1,$H2,$H1 # h1*r1
909 vpaddq $H0,$D3,$D3 # d3 += h2*r1
910 vmovdqa 0x30(%rsp),$H3 # r2^2
911 vpaddq $H1,$D2,$D2 # d2 += h1*r1
912 vmovdqa $H4,0x40(%r11) #
913 vpmuludq $T0,$H2,$H2 # h0*r1
914 vpmuludq $T2,$H3,$H0 # h2*r2
915 vpaddq $H2,$D1,$D1 # d1 += h0*r1
917 vmovdqa 0x40(%rsp),$H4 # s2^2
918 vpaddq $H0,$D4,$D4 # d4 += h2*r2
919 vpmuludq $T1,$H3,$H1 # h1*r2
920 vpmuludq $T0,$H3,$H3 # h0*r2
921 vpaddq $H1,$D3,$D3 # d3 += h1*r2
922 vmovdqa 0x50(%rsp),$H2 # r3^2
923 vpaddq $H3,$D2,$D2 # d2 += h0*r2
924 vpmuludq $T4,$H4,$H0 # h4*s2
925 vpmuludq $T3,$H4,$H4 # h3*s2
926 vpaddq $H0,$D1,$D1 # d1 += h4*s2
927 vmovdqa 0x60(%rsp),$H3 # s3^2
928 vpaddq $H4,$D0,$D0 # d0 += h3*s2
930 vmovdqa 0x80(%rsp),$H4 # s4^2
931 vpmuludq $T1,$H2,$H1 # h1*r3
932 vpmuludq $T0,$H2,$H2 # h0*r3
933 vpaddq $H1,$D4,$D4 # d4 += h1*r3
934 vpaddq $H2,$D3,$D3 # d3 += h0*r3
935 vpmuludq $T4,$H3,$H0 # h4*s3
936 vpmuludq $T3,$H3,$H1 # h3*s3
937 vpaddq $H0,$D2,$D2 # d2 += h4*s3
938 vmovdqu 16*0($inp),$H0 # load input
939 vpaddq $H1,$D1,$D1 # d1 += h3*s3
940 vpmuludq $T2,$H3,$H3 # h2*s3
941 vpmuludq $T2,$H4,$T2 # h2*s4
942 vpaddq $H3,$D0,$D0 # d0 += h2*s3
944 vmovdqu 16*1($inp),$H1 #
945 vpaddq $T2,$D1,$D1 # d1 += h2*s4
946 vpmuludq $T3,$H4,$T3 # h3*s4
947 vpmuludq $T4,$H4,$T4 # h4*s4
948 vpsrldq \$6,$H0,$H2 # splat input
949 vpaddq $T3,$D2,$D2 # d2 += h3*s4
950 vpaddq $T4,$D3,$D3 # d3 += h4*s4
951 vpsrldq \$6,$H1,$H3 #
952 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
953 vpmuludq $T1,$H4,$T0 # h1*s4
954 vpunpckhqdq $H1,$H0,$H4 # 4
955 vpaddq $T4,$D4,$D4 # d4 += h0*r4
956 vmovdqa -0x90(%r11),$T4 # r0^4
957 vpaddq $T0,$D0,$D0 # d0 += h1*s4
959 vpunpcklqdq $H1,$H0,$H0 # 0:1
960 vpunpcklqdq $H3,$H2,$H3 # 2:3
962 #vpsrlq \$40,$H4,$H4 # 4
963 vpsrldq \$`40/8`,$H4,$H4 # 4
965 vpand $MASK,$H0,$H0 # 0
967 vpand $MASK,$H1,$H1 # 1
968 vpand 0(%rcx),$H4,$H4 # .Lmask24
970 vpand $MASK,$H2,$H2 # 2
971 vpand $MASK,$H3,$H3 # 3
972 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
974 vpaddq 0x00(%r11),$H0,$H0 # add hash value
975 vpaddq 0x10(%r11),$H1,$H1
976 vpaddq 0x20(%r11),$H2,$H2
977 vpaddq 0x30(%r11),$H3,$H3
978 vpaddq 0x40(%r11),$H4,$H4
985 ################################################################
986 # Now we accumulate (inp[0:1]+hash)*r^4
987 ################################################################
988 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
989 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
990 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
991 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
992 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
994 vpmuludq $H0,$T4,$T0 # h0*r0
995 vpmuludq $H1,$T4,$T1 # h1*r0
998 vmovdqa -0x80(%r11),$T2 # r1^4
999 vpmuludq $H2,$T4,$T0 # h2*r0
1000 vpmuludq $H3,$T4,$T1 # h3*r0
1003 vpmuludq $H4,$T4,$T4 # h4*r0
1004 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
1007 vpaddq $T0,$D0,$D0 # d0 += h4*s1
1008 vpmuludq $H2,$T2,$T1 # h2*r1
1009 vpmuludq $H3,$T2,$T0 # h3*r1
1010 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1011 vmovdqa -0x60(%r11),$T3 # r2^4
1012 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1013 vpmuludq $H1,$T2,$T1 # h1*r1
1014 vpmuludq $H0,$T2,$T2 # h0*r1
1015 vpaddq $T1,$D2,$D2 # d2 += h1*r1
1016 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1018 vmovdqa -0x50(%r11),$T4 # s2^4
1019 vpmuludq $H2,$T3,$T0 # h2*r2
1020 vpmuludq $H1,$T3,$T1 # h1*r2
1021 vpaddq $T0,$D4,$D4 # d4 += h2*r2
1022 vpaddq $T1,$D3,$D3 # d3 += h1*r2
1023 vmovdqa -0x40(%r11),$T2 # r3^4
1024 vpmuludq $H0,$T3,$T3 # h0*r2
1025 vpmuludq $H4,$T4,$T0 # h4*s2
1026 vpaddq $T3,$D2,$D2 # d2 += h0*r2
1027 vpaddq $T0,$D1,$D1 # d1 += h4*s2
1028 vmovdqa -0x30(%r11),$T3 # s3^4
1029 vpmuludq $H3,$T4,$T4 # h3*s2
1030 vpmuludq $H1,$T2,$T1 # h1*r3
1031 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1033 vmovdqa -0x10(%r11),$T4 # s4^4
1034 vpaddq $T1,$D4,$D4 # d4 += h1*r3
1035 vpmuludq $H0,$T2,$T2 # h0*r3
1036 vpmuludq $H4,$T3,$T0 # h4*s3
1037 vpaddq $T2,$D3,$D3 # d3 += h0*r3
1038 vpaddq $T0,$D2,$D2 # d2 += h4*s3
1039 vmovdqu 16*2($inp),$T0 # load input
1040 vpmuludq $H3,$T3,$T2 # h3*s3
1041 vpmuludq $H2,$T3,$T3 # h2*s3
1042 vpaddq $T2,$D1,$D1 # d1 += h3*s3
1043 vmovdqu 16*3($inp),$T1 #
1044 vpaddq $T3,$D0,$D0 # d0 += h2*s3
1046 vpmuludq $H2,$T4,$H2 # h2*s4
1047 vpmuludq $H3,$T4,$H3 # h3*s4
1048 vpsrldq \$6,$T0,$T2 # splat input
1049 vpaddq $H2,$D1,$D1 # d1 += h2*s4
1050 vpmuludq $H4,$T4,$H4 # h4*s4
1051 vpsrldq \$6,$T1,$T3 #
1052 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
1053 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
1054 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
1055 vpmuludq $H1,$T4,$H0
1056 vpunpckhqdq $T1,$T0,$T4 # 4
1057 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1058 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1060 vpunpcklqdq $T1,$T0,$T0 # 0:1
1061 vpunpcklqdq $T3,$T2,$T3 # 2:3
1063 #vpsrlq \$40,$T4,$T4 # 4
1064 vpsrldq \$`40/8`,$T4,$T4 # 4
1066 vmovdqa 0x00(%rsp),$D4 # preload r0^2
1067 vpand $MASK,$T0,$T0 # 0
1069 vpand $MASK,$T1,$T1 # 1
1070 vpand 0(%rcx),$T4,$T4 # .Lmask24
1072 vpand $MASK,$T2,$T2 # 2
1073 vpand $MASK,$T3,$T3 # 3
1074 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1076 ################################################################
1077 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1082 vpaddq $D3,$H4,$H4 # h3 -> h4
1086 vpaddq $D0,$D1,$H1 # h0 -> h1
1093 vpaddq $D1,$H2,$H2 # h1 -> h2
1097 vpaddq $D0,$H0,$H0 # h4 -> h0
1101 vpaddq $D2,$H3,$H3 # h2 -> h3
1105 vpaddq $D0,$H1,$H1 # h0 -> h1
1109 vpaddq $D3,$H4,$H4 # h3 -> h4
1114 ################################################################
1115 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1117 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1128 vmovdqa $H2,0x20(%r11)
1129 vmovdqa $H0,0x00(%r11)
1130 vmovdqa $H1,0x10(%r11)
1131 vmovdqa $H3,0x30(%r11)
1132 vmovdqa $H4,0x40(%r11)
1134 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1135 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1136 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1137 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1138 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1140 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1141 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1142 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1143 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1144 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1145 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1147 vpmuludq $T3,$H2,$H0 # h3*r1
1148 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1149 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1150 vpmuludq $T2,$H2,$H1 # h2*r1
1151 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1152 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1153 vpmuludq $T1,$H2,$H0 # h1*r1
1154 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1155 vpmuludq $T0,$H2,$H2 # h0*r1
1156 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1157 vpmuludq $T4,$H3,$H3 # h4*s1
1158 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1160 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1161 vpmuludq $T2,$H4,$H1 # h2*r2
1162 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1163 vpmuludq $T1,$H4,$H0 # h1*r2
1164 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1165 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1166 vpmuludq $T0,$H4,$H4 # h0*r2
1167 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1168 vpmuludq $T4,$H2,$H1 # h4*s2
1169 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1170 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1171 vpmuludq $T3,$H2,$H2 # h3*s2
1172 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1174 vpmuludq $T1,$H3,$H0 # h1*r3
1175 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1176 vpmuludq $T0,$H3,$H3 # h0*r3
1177 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1178 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1179 vpmuludq $T4,$H4,$H1 # h4*s3
1180 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1181 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1182 vpmuludq $T3,$H4,$H0 # h3*s3
1183 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1184 vpmuludq $T2,$H4,$H4 # h2*s3
1185 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1187 vpmuludq $T0,$H2,$H2 # h0*r4
1188 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1189 vpmuludq $T4,$H3,$H1 # h4*s4
1190 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1191 vpmuludq $T3,$H3,$H0 # h3*s4
1192 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1193 vpmuludq $T2,$H3,$H1 # h2*s4
1194 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1195 vpmuludq $T1,$H3,$H3 # h1*s4
1196 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1200 vmovdqu 16*0($inp),$H0 # load input
1201 vmovdqu 16*1($inp),$H1
1203 vpsrldq \$6,$H0,$H2 # splat input
1205 vpunpckhqdq $H1,$H0,$H4 # 4
1206 vpunpcklqdq $H1,$H0,$H0 # 0:1
1207 vpunpcklqdq $H3,$H2,$H3 # 2:3
1209 vpsrlq \$40,$H4,$H4 # 4
1211 vpand $MASK,$H0,$H0 # 0
1213 vpand $MASK,$H1,$H1 # 1
1215 vpand $MASK,$H2,$H2 # 2
1216 vpand $MASK,$H3,$H3 # 3
1217 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1219 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1220 vpaddq 0x00(%r11),$H0,$H0
1221 vpaddq 0x10(%r11),$H1,$H1
1222 vpaddq 0x20(%r11),$H2,$H2
1223 vpaddq 0x30(%r11),$H3,$H3
1224 vpaddq 0x40(%r11),$H4,$H4
1226 ################################################################
1227 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1229 vpmuludq $H0,$T4,$T0 # h0*r0
1230 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1231 vpmuludq $H1,$T4,$T1 # h1*r0
1232 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1233 vpmuludq $H2,$T4,$T0 # h2*r0
1234 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1235 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1236 vpmuludq $H3,$T4,$T1 # h3*r0
1237 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1238 vpmuludq $H4,$T4,$T4 # h4*r0
1239 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1241 vpmuludq $H3,$T2,$T0 # h3*r1
1242 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1243 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1244 vpmuludq $H2,$T2,$T1 # h2*r1
1245 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1246 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1247 vpmuludq $H1,$T2,$T0 # h1*r1
1248 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1249 vpmuludq $H0,$T2,$T2 # h0*r1
1250 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1251 vpmuludq $H4,$T3,$T3 # h4*s1
1252 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1254 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1255 vpmuludq $H2,$T4,$T1 # h2*r2
1256 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1257 vpmuludq $H1,$T4,$T0 # h1*r2
1258 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1259 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1260 vpmuludq $H0,$T4,$T4 # h0*r2
1261 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1262 vpmuludq $H4,$T2,$T1 # h4*s2
1263 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1264 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1265 vpmuludq $H3,$T2,$T2 # h3*s2
1266 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1268 vpmuludq $H1,$T3,$T0 # h1*r3
1269 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1270 vpmuludq $H0,$T3,$T3 # h0*r3
1271 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1272 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1273 vpmuludq $H4,$T4,$T1 # h4*s3
1274 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1275 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1276 vpmuludq $H3,$T4,$T0 # h3*s3
1277 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1278 vpmuludq $H2,$T4,$T4 # h2*s3
1279 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1281 vpmuludq $H0,$T2,$T2 # h0*r4
1282 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1283 vpmuludq $H4,$T3,$T1 # h4*s4
1284 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1285 vpmuludq $H3,$T3,$T0 # h3*s4
1286 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1287 vpmuludq $H2,$T3,$T1 # h2*s4
1288 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1289 vpmuludq $H1,$T3,$T3 # h1*s4
1290 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1293 ################################################################
1294 # horizontal addition
1307 ################################################################
1312 vpaddq $H3,$D4,$D4 # h3 -> h4
1316 vpaddq $H0,$D1,$D1 # h0 -> h1
1323 vpaddq $H1,$D2,$D2 # h1 -> h2
1327 vpaddq $H4,$D0,$D0 # h4 -> h0
1331 vpaddq $H2,$D3,$D3 # h2 -> h3
1335 vpaddq $H0,$D1,$D1 # h0 -> h1
1339 vpaddq $H3,$D4,$D4 # h3 -> h4
1341 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1342 vmovd $D1,`4*1-48-64`($ctx)
1343 vmovd $D2,`4*2-48-64`($ctx)
1344 vmovd $D3,`4*3-48-64`($ctx)
1345 vmovd $D4,`4*4-48-64`($ctx)
1347 $code.=<<___ if ($win64);
1348 vmovdqa 0x50(%r11),%xmm6
1349 vmovdqa 0x60(%r11),%xmm7
1350 vmovdqa 0x70(%r11),%xmm8
1351 vmovdqa 0x80(%r11),%xmm9
1352 vmovdqa 0x90(%r11),%xmm10
1353 vmovdqa 0xa0(%r11),%xmm11
1354 vmovdqa 0xb0(%r11),%xmm12
1355 vmovdqa 0xc0(%r11),%xmm13
1356 vmovdqa 0xd0(%r11),%xmm14
1357 vmovdqa 0xe0(%r11),%xmm15
1361 $code.=<<___ if (!$win64);
1369 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1371 .type poly1305_emit_avx,\@function,3
1374 cmpl \$0,20($ctx) # is_base2_26?
1377 mov 0($ctx),%eax # load hash value base 2^26
1383 shl \$26,%rcx # base 2^26 -> base 2^64
1399 mov %r10,%rax # could be partially reduced, so reduce
1410 add \$5,%r8 # compare to modulus
1414 shr \$2,%r10 # did 130-bit value overflow?
1418 add 0($nonce),%rax # accumulate nonce
1420 mov %rax,0($mac) # write result
1424 .size poly1305_emit_avx,.-poly1305_emit_avx
1428 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1429 map("%ymm$_",(0..15));
1433 .type poly1305_blocks_avx2,\@function,4
1435 poly1305_blocks_avx2:
1437 mov 20($ctx),%r8d # is_base2_26
1469 mov $len,%r15 # reassign $len
1471 mov 0($ctx),$d1 # load hash value
1475 mov 24($ctx),$r0 # load r
1478 ################################# base 2^26 -> base 2^64
1480 and \$`-1*(1<<31)`,$d1
1481 mov $d2,$r1 # borrow $r1
1483 and \$`-1*(1<<31)`,$d2
1497 adc \$0,$h2 # can be partially reduced...
1499 mov \$-4,$d2 # ... so reduce
1512 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1514 .Lbase2_26_pre_avx2:
1515 add 0($inp),$h0 # accumulate input
1521 call __poly1305_block
1525 jnz .Lbase2_26_pre_avx2
1527 test $padbit,$padbit # if $padbit is zero,
1528 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1530 ################################# base 2^64 -> base 2^26
1537 and \$0x3ffffff,%rax # h[0]
1539 and \$0x3ffffff,%rdx # h[1]
1543 and \$0x3ffffff,$h0 # h[2]
1545 and \$0x3ffffff,$h1 # h[3]
1549 jz .Lstore_base2_26_avx2
1559 .Lstore_base2_64_avx2:
1562 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1566 .Lstore_base2_26_avx2:
1567 mov %rax#d,0($ctx) # store hash value base 2^26
1587 .cfi_adjust_cfa_offset -48
1589 .Lblocks_avx2_epilogue:
1608 .Lbase2_64_avx2_body:
1610 mov $len,%r15 # reassign $len
1612 mov 24($ctx),$r0 # load r
1615 mov 0($ctx),$h0 # load hash value
1622 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1627 .Lbase2_64_pre_avx2:
1628 add 0($inp),$h0 # accumulate input
1634 call __poly1305_block
1638 jnz .Lbase2_64_pre_avx2
1641 ################################# base 2^64 -> base 2^26
1648 and \$0x3ffffff,%rax # h[0]
1650 and \$0x3ffffff,%rdx # h[1]
1654 and \$0x3ffffff,$h0 # h[2]
1656 and \$0x3ffffff,$h1 # h[3]
1664 movl \$1,20($ctx) # set is_base2_26
1666 call __poly1305_init_avx
1669 mov %r15,$len # restore $len
1670 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1671 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1687 .cfi_adjust_cfa_offset -48
1688 .Lbase2_64_avx2_epilogue:
1695 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1696 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1697 vmovd 4*1($ctx),%x#$H1
1698 vmovd 4*2($ctx),%x#$H2
1699 vmovd 4*3($ctx),%x#$H3
1700 vmovd 4*4($ctx),%x#$H4
1704 $code.=<<___ if ($avx>2);
1708 test \$`1<<16`,%r10d # check for AVX512F
1712 $code.=<<___ if (!$win64);
1714 .cfi_def_cfa %r11,16
1717 $code.=<<___ if ($win64);
1718 lea -0xf8(%rsp),%r11
1720 vmovdqa %xmm6,0x50(%r11)
1721 vmovdqa %xmm7,0x60(%r11)
1722 vmovdqa %xmm8,0x70(%r11)
1723 vmovdqa %xmm9,0x80(%r11)
1724 vmovdqa %xmm10,0x90(%r11)
1725 vmovdqa %xmm11,0xa0(%r11)
1726 vmovdqa %xmm12,0xb0(%r11)
1727 vmovdqa %xmm13,0xc0(%r11)
1728 vmovdqa %xmm14,0xd0(%r11)
1729 vmovdqa %xmm15,0xe0(%r11)
1733 lea .Lconst(%rip),%rcx
1734 lea 48+64($ctx),$ctx # size optimization
1735 vmovdqa 96(%rcx),$T0 # .Lpermd_avx2
1737 # expand and copy pre-calculated table to stack
1738 vmovdqu `16*0-64`($ctx),%x#$T2
1740 vmovdqu `16*1-64`($ctx),%x#$T3
1741 vmovdqu `16*2-64`($ctx),%x#$T4
1742 vmovdqu `16*3-64`($ctx),%x#$D0
1743 vmovdqu `16*4-64`($ctx),%x#$D1
1744 vmovdqu `16*5-64`($ctx),%x#$D2
1745 lea 0x90(%rsp),%rax # size optimization
1746 vmovdqu `16*6-64`($ctx),%x#$D3
1747 vpermd $T2,$T0,$T2 # 00003412 -> 14243444
1748 vmovdqu `16*7-64`($ctx),%x#$D4
1750 vmovdqu `16*8-64`($ctx),%x#$MASK
1752 vmovdqa $T2,0x00(%rsp)
1754 vmovdqa $T3,0x20-0x90(%rax)
1756 vmovdqa $T4,0x40-0x90(%rax)
1758 vmovdqa $D0,0x60-0x90(%rax)
1760 vmovdqa $D1,0x80-0x90(%rax)
1762 vmovdqa $D2,0xa0-0x90(%rax)
1763 vpermd $MASK,$T0,$MASK
1764 vmovdqa $D3,0xc0-0x90(%rax)
1765 vmovdqa $D4,0xe0-0x90(%rax)
1766 vmovdqa $MASK,0x100-0x90(%rax)
1767 vmovdqa 64(%rcx),$MASK # .Lmask26
1769 ################################################################
1771 vmovdqu 16*0($inp),%x#$T0
1772 vmovdqu 16*1($inp),%x#$T1
1773 vinserti128 \$1,16*2($inp),$T0,$T0
1774 vinserti128 \$1,16*3($inp),$T1,$T1
1777 vpsrldq \$6,$T0,$T2 # splat input
1779 vpunpckhqdq $T1,$T0,$T4 # 4
1780 vpunpcklqdq $T3,$T2,$T2 # 2:3
1781 vpunpcklqdq $T1,$T0,$T0 # 0:1
1786 vpsrlq \$40,$T4,$T4 # 4
1787 vpand $MASK,$T2,$T2 # 2
1788 vpand $MASK,$T0,$T0 # 0
1789 vpand $MASK,$T1,$T1 # 1
1790 vpand $MASK,$T3,$T3 # 3
1791 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1793 vpaddq $H2,$T2,$H2 # accumulate input
1800 ################################################################
1801 # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1802 # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1803 # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1804 # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1805 # \________/\__________/
1806 ################################################################
1807 #vpaddq $H2,$T2,$H2 # accumulate input
1809 vmovdqa `32*0`(%rsp),$T0 # r0^4
1811 vmovdqa `32*1`(%rsp),$T1 # r1^4
1813 vmovdqa `32*3`(%rsp),$T2 # r2^4
1815 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1816 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1818 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1819 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1820 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1821 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1822 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1824 # however, as h2 is "chronologically" first one available pull
1825 # corresponding operations up, so it's
1827 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1828 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1829 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1830 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1831 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1833 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1834 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1835 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1836 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1837 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1839 vpmuludq $H0,$T1,$T4 # h0*r1
1840 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1841 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1842 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1843 vpmuludq $H3,$T1,$T4 # h3*r1
1844 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1845 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1846 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1847 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1849 vpmuludq $H0,$T0,$T4 # h0*r0
1850 vpmuludq $H1,$T0,$H2 # h1*r0
1851 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1852 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1853 vpmuludq $H3,$T0,$T4 # h3*r0
1854 vpmuludq $H4,$T0,$H2 # h4*r0
1855 vmovdqu 16*0($inp),%x#$T0 # load input
1856 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1857 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1858 vinserti128 \$1,16*2($inp),$T0,$T0
1860 vpmuludq $H3,$T1,$T4 # h3*s2
1861 vpmuludq $H4,$T1,$H2 # h4*s2
1862 vmovdqu 16*1($inp),%x#$T1
1863 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1864 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1865 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1866 vpmuludq $H1,$T2,$T4 # h1*r2
1867 vpmuludq $H0,$T2,$T2 # h0*r2
1868 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1869 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1870 vinserti128 \$1,16*3($inp),$T1,$T1
1873 vpmuludq $H1,$H2,$T4 # h1*r3
1874 vpmuludq $H0,$H2,$H2 # h0*r3
1875 vpsrldq \$6,$T0,$T2 # splat input
1876 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1877 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1878 vpmuludq $H3,$T3,$T4 # h3*s3
1879 vpmuludq $H4,$T3,$H2 # h4*s3
1881 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1882 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1883 vpunpckhqdq $T1,$T0,$T4 # 4
1885 vpmuludq $H3,$S4,$H3 # h3*s4
1886 vpmuludq $H4,$S4,$H4 # h4*s4
1887 vpunpcklqdq $T1,$T0,$T0 # 0:1
1888 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1889 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1890 vpunpcklqdq $T3,$T2,$T3 # 2:3
1891 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1892 vpmuludq $H1,$S4,$H0 # h1*s4
1893 vmovdqa 64(%rcx),$MASK # .Lmask26
1894 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1895 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1897 ################################################################
1898 # lazy reduction (interleaved with tail of input splat)
1902 vpaddq $D3,$H4,$H4 # h3 -> h4
1906 vpaddq $D0,$D1,$H1 # h0 -> h1
1915 vpaddq $D1,$H2,$H2 # h1 -> h2
1919 vpaddq $D4,$H0,$H0 # h4 -> h0
1921 vpand $MASK,$T2,$T2 # 2
1926 vpaddq $D2,$H3,$H3 # h2 -> h3
1928 vpaddq $T2,$H2,$H2 # modulo-scheduled
1933 vpaddq $D0,$H1,$H1 # h0 -> h1
1935 vpsrlq \$40,$T4,$T4 # 4
1939 vpaddq $D3,$H4,$H4 # h3 -> h4
1941 vpand $MASK,$T0,$T0 # 0
1942 vpand $MASK,$T1,$T1 # 1
1943 vpand $MASK,$T3,$T3 # 3
1944 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1951 ################################################################
1952 # while above multiplications were by r^4 in all lanes, in last
1953 # iteration we multiply least significant lane by r^4 and most
1954 # significant one by r, so copy of above except that references
1955 # to the precomputed table are displaced by 4...
1957 #vpaddq $H2,$T2,$H2 # accumulate input
1959 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1961 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1963 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1965 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1966 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1968 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1969 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1970 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1971 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1972 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1974 vpmuludq $H0,$T1,$T4 # h0*r1
1975 vpmuludq $H1,$T1,$H2 # h1*r1
1976 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1977 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1978 vpmuludq $H3,$T1,$T4 # h3*r1
1979 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1980 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1981 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1983 vpmuludq $H0,$T0,$T4 # h0*r0
1984 vpmuludq $H1,$T0,$H2 # h1*r0
1985 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1986 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1987 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1988 vpmuludq $H3,$T0,$T4 # h3*r0
1989 vpmuludq $H4,$T0,$H2 # h4*r0
1990 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1991 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1993 vpmuludq $H3,$T1,$T4 # h3*s2
1994 vpmuludq $H4,$T1,$H2 # h4*s2
1995 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1996 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1997 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1998 vpmuludq $H1,$T2,$T4 # h1*r2
1999 vpmuludq $H0,$T2,$T2 # h0*r2
2000 vpaddq $T4,$D3,$D3 # d3 += h1*r2
2001 vpaddq $T2,$D2,$D2 # d2 += h0*r2
2003 vpmuludq $H1,$H2,$T4 # h1*r3
2004 vpmuludq $H0,$H2,$H2 # h0*r3
2005 vpaddq $T4,$D4,$D4 # d4 += h1*r3
2006 vpaddq $H2,$D3,$D3 # d3 += h0*r3
2007 vpmuludq $H3,$T3,$T4 # h3*s3
2008 vpmuludq $H4,$T3,$H2 # h4*s3
2009 vpaddq $T4,$D1,$D1 # d1 += h3*s3
2010 vpaddq $H2,$D2,$D2 # d2 += h4*s3
2012 vpmuludq $H3,$S4,$H3 # h3*s4
2013 vpmuludq $H4,$S4,$H4 # h4*s4
2014 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
2015 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
2016 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
2017 vpmuludq $H1,$S4,$H0 # h1*s4
2018 vmovdqa 64(%rcx),$MASK # .Lmask26
2019 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
2020 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
2022 ################################################################
2023 # horizontal addition
2036 vpermq \$0x2,$H3,$T3
2037 vpermq \$0x2,$H4,$T4
2038 vpermq \$0x2,$H0,$T0
2039 vpermq \$0x2,$D1,$T1
2040 vpermq \$0x2,$H2,$T2
2047 ################################################################
2052 vpaddq $D3,$H4,$H4 # h3 -> h4
2056 vpaddq $D0,$D1,$H1 # h0 -> h1
2063 vpaddq $D1,$H2,$H2 # h1 -> h2
2067 vpaddq $D4,$H0,$H0 # h4 -> h0
2071 vpaddq $D2,$H3,$H3 # h2 -> h3
2075 vpaddq $D0,$H1,$H1 # h0 -> h1
2079 vpaddq $D3,$H4,$H4 # h3 -> h4
2081 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2082 vmovd %x#$H1,`4*1-48-64`($ctx)
2083 vmovd %x#$H2,`4*2-48-64`($ctx)
2084 vmovd %x#$H3,`4*3-48-64`($ctx)
2085 vmovd %x#$H4,`4*4-48-64`($ctx)
2087 $code.=<<___ if ($win64);
2088 vmovdqa 0x50(%r11),%xmm6
2089 vmovdqa 0x60(%r11),%xmm7
2090 vmovdqa 0x70(%r11),%xmm8
2091 vmovdqa 0x80(%r11),%xmm9
2092 vmovdqa 0x90(%r11),%xmm10
2093 vmovdqa 0xa0(%r11),%xmm11
2094 vmovdqa 0xb0(%r11),%xmm12
2095 vmovdqa 0xc0(%r11),%xmm13
2096 vmovdqa 0xd0(%r11),%xmm14
2097 vmovdqa 0xe0(%r11),%xmm15
2101 $code.=<<___ if (!$win64);
2109 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
2111 #######################################################################
2113 # On entry we have input length divisible by 64. But since inner loop
2114 # processes 128 bytes per iteration, cases when length is not divisible
2115 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2116 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2117 # for this tail, we wouldn't have to even allocate stack frame...
2119 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
2120 my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
2121 my $PADBIT="%zmm30";
2123 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
2124 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2125 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2126 map(s/%y/%z/,($MASK));
2129 .type poly1305_blocks_avx512,\@function,4
2131 poly1305_blocks_avx512:
2137 $code.=<<___ if (!$win64);
2139 .cfi_def_cfa %r11,16
2142 $code.=<<___ if ($win64);
2143 lea -0xf8(%rsp),%r11
2145 vmovdqa %xmm6,0x50(%r11)
2146 vmovdqa %xmm7,0x60(%r11)
2147 vmovdqa %xmm8,0x70(%r11)
2148 vmovdqa %xmm9,0x80(%r11)
2149 vmovdqa %xmm10,0x90(%r11)
2150 vmovdqa %xmm11,0xa0(%r11)
2151 vmovdqa %xmm12,0xb0(%r11)
2152 vmovdqa %xmm13,0xc0(%r11)
2153 vmovdqa %xmm14,0xd0(%r11)
2154 vmovdqa %xmm15,0xe0(%r11)
2158 lea .Lconst(%rip),%rcx
2159 lea 48+64($ctx),$ctx # size optimization
2160 vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2
2162 # expand pre-calculated table
2163 vmovdqu32 `16*0-64`($ctx),${R0}{%k2}{z}
2165 vmovdqu32 `16*1-64`($ctx),${R1}{%k2}{z}
2167 vmovdqu32 `16*2-64`($ctx),${S1}{%k2}{z}
2168 vmovdqu32 `16*3-64`($ctx),${R2}{%k2}{z}
2169 vmovdqu32 `16*4-64`($ctx),${S2}{%k2}{z}
2170 vmovdqu32 `16*5-64`($ctx),${R3}{%k2}{z}
2171 vmovdqu32 `16*6-64`($ctx),${S3}{%k2}{z}
2172 vmovdqu32 `16*7-64`($ctx),${R4}{%k2}{z}
2173 vmovdqu32 `16*8-64`($ctx),${S4}{%k2}{z}
2174 vpermd $R0,$T2,$R0 # 00003412 -> 14243444
2175 vpbroadcastq 64(%rcx),$MASK # .Lmask26
2179 vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0
2180 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
2182 vmovdqu64 $R1,0x00(%rsp,%rax){%k2}
2185 vmovdqa64 $S1,0x40(%rsp){%k2}
2188 vmovdqu64 $R2,0x40(%rsp,%rax){%k2}
2190 vmovdqa64 $S2,0x80(%rsp){%k2}
2191 vmovdqu64 $R3,0x80(%rsp,%rax){%k2}
2192 vmovdqa64 $S3,0xc0(%rsp){%k2}
2193 vmovdqu64 $R4,0xc0(%rsp,%rax){%k2}
2194 vmovdqa64 $S4,0x100(%rsp){%k2}
2196 ################################################################
2197 # calculate 5th through 8th powers of the key
2199 # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2200 # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2201 # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
2202 # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
2203 # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
2205 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
2206 vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
2207 vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
2208 vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
2209 vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
2212 vpmuludq $T1,$S4,$M0
2213 vpmuludq $T1,$R0,$M1
2214 vpmuludq $T1,$R1,$M2
2215 vpmuludq $T1,$R2,$M3
2216 vpmuludq $T1,$R3,$M4
2218 vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
2219 vpaddq $M1,$D1,$D1 # d1 += r1'*r0
2220 vpaddq $M2,$D2,$D2 # d2 += r1'*r1
2221 vpaddq $M3,$D3,$D3 # d3 += r1'*r2
2222 vpaddq $M4,$D4,$D4 # d4 += r1'*r3
2224 vpmuludq $T2,$S3,$M0
2225 vpmuludq $T2,$S4,$M1
2226 vpmuludq $T2,$R1,$M3
2227 vpmuludq $T2,$R2,$M4
2228 vpmuludq $T2,$R0,$M2
2230 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
2231 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
2232 vpaddq $M3,$D3,$D3 # d3 += r2'*r1
2233 vpaddq $M4,$D4,$D4 # d4 += r2'*r2
2234 vpaddq $M2,$D2,$D2 # d2 += r2'*r0
2236 vpmuludq $T3,$S2,$M0
2237 vpmuludq $T3,$R0,$M3
2238 vpmuludq $T3,$R1,$M4
2239 vpmuludq $T3,$S3,$M1
2240 vpmuludq $T3,$S4,$M2
2241 vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
2242 vpaddq $M3,$D3,$D3 # d3 += r3'*r0
2243 vpaddq $M4,$D4,$D4 # d4 += r3'*r1
2244 vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
2245 vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
2247 vpmuludq $T4,$S4,$M3
2248 vpmuludq $T4,$R0,$M4
2249 vpmuludq $T4,$S1,$M0
2250 vpmuludq $T4,$S2,$M1
2251 vpmuludq $T4,$S3,$M2
2252 vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
2253 vpaddq $M4,$D4,$D4 # d4 += r2'*r0
2254 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
2255 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
2256 vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
2258 ################################################################
2260 vmovdqu64 16*0($inp),%z#$T3
2261 vmovdqu64 16*4($inp),%z#$T4
2264 ################################################################
2268 vpandq $MASK,$D3,$D3
2269 vpaddq $M3,$D4,$D4 # d3 -> d4
2272 vpandq $MASK,$D0,$D0
2273 vpaddq $M0,$D1,$D1 # d0 -> d1
2276 vpandq $MASK,$D4,$D4
2279 vpandq $MASK,$D1,$D1
2280 vpaddq $M1,$D2,$D2 # d1 -> d2
2284 vpaddq $M4,$D0,$D0 # d4 -> d0
2287 vpandq $MASK,$D2,$D2
2288 vpaddq $M2,$D3,$D3 # d2 -> d3
2291 vpandq $MASK,$D0,$D0
2292 vpaddq $M0,$D1,$D1 # d0 -> d1
2295 vpandq $MASK,$D3,$D3
2296 vpaddq $M3,$D4,$D4 # d3 -> d4
2298 ################################################################
2299 # at this point we have 14243444 in $R0-$S4 and 05060708 in
2302 vpunpcklqdq $T4,$T3,$T0 # transpose input
2303 vpunpckhqdq $T4,$T3,$T4
2305 # ... since input 64-bit lanes are ordered as 73625140, we could
2306 # "vperm" it to 76543210 (here and in each loop iteration), *or*
2307 # we could just flow along, hence the goal for $R0-$S4 is
2308 # 1858286838784888 ...
2310 vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512:
2314 vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4---
2320 vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888
2321 vpermd $D1,$M0,${R1}{%k1}
2322 vpermd $D2,$M0,${R2}{%k1}
2323 vpermd $D3,$M0,${R3}{%k1}
2324 vpermd $D4,$M0,${R4}{%k1}
2326 vpslld \$2,$R1,$S1 # *5
2335 vpbroadcastq 32(%rcx),$PADBIT # .L129
2337 vpsrlq \$52,$T0,$T2 # splat input
2342 vpsrlq \$40,$T4,$T4 # 4
2343 vpandq $MASK,$T2,$T2 # 2
2344 vpandq $MASK,$T0,$T0 # 0
2345 #vpandq $MASK,$T1,$T1 # 1
2346 #vpandq $MASK,$T3,$T3 # 3
2347 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2349 vpaddq $H2,$T2,$H2 # accumulate input
2356 ################################################################
2357 # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2358 # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2359 # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2360 # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2361 # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2362 # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2363 # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2364 # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2365 # \________/\___________/
2366 ################################################################
2367 #vpaddq $H2,$T2,$H2 # accumulate input
2369 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
2370 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
2371 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
2372 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
2373 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2375 # however, as h2 is "chronologically" first one available pull
2376 # corresponding operations up, so it's
2378 # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
2379 # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
2380 # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
2381 # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
2382 # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
2384 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2386 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2387 vpandq $MASK,$T1,$T1 # 1
2388 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2389 vpandq $MASK,$T3,$T3 # 3
2390 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2391 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2392 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2393 vpaddq $H1,$T1,$H1 # accumulate input
2397 vmovdqu64 16*0($inp),$T3 # load input
2398 vmovdqu64 16*4($inp),$T4
2400 vpmuludq $H0,$R3,$M3
2401 vpmuludq $H0,$R4,$M4
2402 vpmuludq $H0,$R0,$M0
2403 vpmuludq $H0,$R1,$M1
2404 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2405 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2406 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2407 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2409 vpmuludq $H1,$R2,$M3
2410 vpmuludq $H1,$R3,$M4
2411 vpmuludq $H1,$S4,$M0
2412 vpmuludq $H0,$R2,$M2
2413 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2414 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2415 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2416 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2418 vpunpcklqdq $T4,$T3,$T0 # transpose input
2419 vpunpckhqdq $T4,$T3,$T4
2421 vpmuludq $H3,$R0,$M3
2422 vpmuludq $H3,$R1,$M4
2423 vpmuludq $H1,$R0,$M1
2424 vpmuludq $H1,$R1,$M2
2425 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2426 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2427 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2428 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2430 vpmuludq $H4,$S4,$M3
2431 vpmuludq $H4,$R0,$M4
2432 vpmuludq $H3,$S2,$M0
2433 vpmuludq $H3,$S3,$M1
2434 vpaddq $M3,$D3,$D3 # d3 += h4*s4
2435 vpmuludq $H3,$S4,$M2
2436 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2437 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2438 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2439 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2441 vpmuludq $H4,$S1,$M0
2442 vpmuludq $H4,$S2,$M1
2443 vpmuludq $H4,$S3,$M2
2444 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2445 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2446 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2448 ################################################################
2449 # lazy reduction (interleaved with input splat)
2451 vpsrlq \$52,$T0,$T2 # splat input
2455 vpandq $MASK,$D3,$D3
2456 vpaddq $H3,$D4,$H4 # h3 -> h4
2461 vpandq $MASK,$H0,$H0
2462 vpaddq $D0,$H1,$H1 # h0 -> h1
2464 vpandq $MASK,$T2,$T2 # 2
2467 vpandq $MASK,$H4,$H4
2470 vpandq $MASK,$H1,$H1
2471 vpaddq $D1,$H2,$H2 # h1 -> h2
2475 vpaddq $D4,$H0,$H0 # h4 -> h0
2477 vpaddq $T2,$H2,$H2 # modulo-scheduled
2481 vpandq $MASK,$H2,$H2
2482 vpaddq $D2,$D3,$H3 # h2 -> h3
2487 vpandq $MASK,$H0,$H0
2488 vpaddq $D0,$H1,$H1 # h0 -> h1
2490 vpsrlq \$40,$T4,$T4 # 4
2493 vpandq $MASK,$H3,$H3
2494 vpaddq $D3,$H4,$H4 # h3 -> h4
2496 vpandq $MASK,$T0,$T0 # 0
2497 #vpandq $MASK,$T1,$T1 # 1
2498 #vpandq $MASK,$T3,$T3 # 3
2499 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2505 ################################################################
2506 # while above multiplications were by r^8 in all lanes, in last
2507 # iteration we multiply least significant lane by r^8 and most
2508 # significant one by r, that's why table gets shifted...
2510 vpsrlq \$32,$R0,$R0 # 0105020603070408
2520 ################################################################
2521 # load either next or last 64 byte of input
2522 lea ($inp,$len),$inp
2524 #vpaddq $H2,$T2,$H2 # accumulate input
2527 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2528 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2529 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2530 vpandq $MASK,$T1,$T1 # 1
2531 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2532 vpandq $MASK,$T3,$T3 # 3
2533 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2534 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2535 vpaddq $H1,$T1,$H1 # accumulate input
2539 vmovdqu 16*0($inp),%x#$T0
2540 vpmuludq $H0,$R3,$M3
2541 vpmuludq $H0,$R4,$M4
2542 vpmuludq $H0,$R0,$M0
2543 vpmuludq $H0,$R1,$M1
2544 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2545 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2546 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2547 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2549 vmovdqu 16*1($inp),%x#$T1
2550 vpmuludq $H1,$R2,$M3
2551 vpmuludq $H1,$R3,$M4
2552 vpmuludq $H1,$S4,$M0
2553 vpmuludq $H0,$R2,$M2
2554 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2555 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2556 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2557 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2559 vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0
2560 vpmuludq $H3,$R0,$M3
2561 vpmuludq $H3,$R1,$M4
2562 vpmuludq $H1,$R0,$M1
2563 vpmuludq $H1,$R1,$M2
2564 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2565 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2566 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2567 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2569 vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1
2570 vpmuludq $H4,$S4,$M3
2571 vpmuludq $H4,$R0,$M4
2572 vpmuludq $H3,$S2,$M0
2573 vpmuludq $H3,$S3,$M1
2574 vpmuludq $H3,$S4,$M2
2575 vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
2576 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2577 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2578 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2579 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2581 vpmuludq $H4,$S1,$M0
2582 vpmuludq $H4,$S2,$M1
2583 vpmuludq $H4,$S3,$M2
2584 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2585 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2586 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2588 ################################################################
2589 # horizontal addition
2592 vpermq \$0xb1,$H3,$D3
2593 vpermq \$0xb1,$D4,$H4
2594 vpermq \$0xb1,$H0,$D0
2595 vpermq \$0xb1,$H1,$D1
2596 vpermq \$0xb1,$H2,$D2
2604 vpermq \$0x2,$H3,$D3
2605 vpermq \$0x2,$H4,$D4
2606 vpermq \$0x2,$H0,$D0
2607 vpermq \$0x2,$H1,$D1
2608 vpermq \$0x2,$H2,$D2
2615 vextracti64x4 \$0x1,$H3,%y#$D3
2616 vextracti64x4 \$0x1,$H4,%y#$D4
2617 vextracti64x4 \$0x1,$H0,%y#$D0
2618 vextracti64x4 \$0x1,$H1,%y#$D1
2619 vextracti64x4 \$0x1,$H2,%y#$D2
2620 vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
2621 vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
2622 vpaddq $D0,$H0,${H0}{%k3}{z}
2623 vpaddq $D1,$H1,${H1}{%k3}{z}
2624 vpaddq $D2,$H2,${H2}{%k3}{z}
2626 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2627 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2629 ################################################################
2630 # lazy reduction (interleaved with input splat)
2634 vpsrldq \$6,$T0,$T2 # splat input
2636 vpunpckhqdq $T1,$T0,$T4 # 4
2637 vpaddq $D3,$H4,$H4 # h3 -> h4
2641 vpunpcklqdq $T3,$T2,$T2 # 2:3
2642 vpunpcklqdq $T1,$T0,$T0 # 0:1
2643 vpaddq $D0,$H1,$H1 # h0 -> h1
2652 vpaddq $D1,$H2,$H2 # h1 -> h2
2657 vpsrlq \$40,$T4,$T4 # 4
2658 vpaddq $D4,$H0,$H0 # h4 -> h0
2662 vpand $MASK,$T2,$T2 # 2
2663 vpand $MASK,$T0,$T0 # 0
2664 vpaddq $D2,$H3,$H3 # h2 -> h3
2668 vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
2669 vpand $MASK,$T1,$T1 # 1
2670 vpaddq $D0,$H1,$H1 # h0 -> h1
2674 vpand $MASK,$T3,$T3 # 3
2675 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
2676 vpaddq $D3,$H4,$H4 # h3 -> h4
2678 lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
2682 vpsubq $T2,$H2,$H2 # undo input accumulation
2683 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2684 vmovd %x#$H1,`4*1-48-64`($ctx)
2685 vmovd %x#$H2,`4*2-48-64`($ctx)
2686 vmovd %x#$H3,`4*3-48-64`($ctx)
2687 vmovd %x#$H4,`4*4-48-64`($ctx)
2690 $code.=<<___ if ($win64);
2691 movdqa 0x50(%r11),%xmm6
2692 movdqa 0x60(%r11),%xmm7
2693 movdqa 0x70(%r11),%xmm8
2694 movdqa 0x80(%r11),%xmm9
2695 movdqa 0x90(%r11),%xmm10
2696 movdqa 0xa0(%r11),%xmm11
2697 movdqa 0xb0(%r11),%xmm12
2698 movdqa 0xc0(%r11),%xmm13
2699 movdqa 0xd0(%r11),%xmm14
2700 movdqa 0xe0(%r11),%xmm15
2702 .Ldo_avx512_epilogue:
2704 $code.=<<___ if (!$win64);
2711 .size poly1305_blocks_avx512,.-poly1305_blocks_avx512
2714 ########################################################################
2715 # VPMADD52 version using 2^44 radix.
2717 # One can argue that base 2^52 would be more natural. Well, even though
2718 # some operations would be more natural, one has to recognize couple of
2719 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2720 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2721 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2722 # reference implementations], which means that more such operations
2723 # would have to be performed in inner loop, which in turn makes critical
2724 # path longer. In other words, even though base 2^44 reduction might
2725 # look less elegant, overall critical path is actually shorter...
2727 ########################################################################
2728 # Layout of opaque area is following.
2730 # unsigned __int64 h[3]; # current hash value base 2^44
2731 # unsigned __int64 s[2]; # key value*20 base 2^44
2732 # unsigned __int64 r[3]; # key value base 2^44
2733 # struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2734 # # r^n positions reflect
2735 # # placement in register, not
2736 # # memory, R[3] is R[1]*20
2739 .type poly1305_init_base2_44,\@function,3
2741 poly1305_init_base2_44:
2743 mov %rax,0($ctx) # initialize hash value
2748 lea poly1305_blocks_vpmadd52(%rip),%r10
2749 lea poly1305_emit_base2_44(%rip),%r11
2751 mov \$0x0ffffffc0fffffff,%rax
2752 mov \$0x0ffffffc0ffffffc,%rcx
2754 mov \$0x00000fffffffffff,%r8
2756 mov \$0x00000fffffffffff,%r9
2759 mov %r8,40($ctx) # r0
2762 mov %rax,48($ctx) # r1
2763 lea (%rax,%rax,4),%rax # *5
2764 mov %rcx,56($ctx) # r2
2765 shl \$2,%rax # magic <<2
2766 lea (%rcx,%rcx,4),%rcx # *5
2767 shl \$2,%rcx # magic <<2
2768 mov %rax,24($ctx) # s1
2769 mov %rcx,32($ctx) # s2
2770 movq \$-1,64($ctx) # write impossible value
2772 $code.=<<___ if ($flavour !~ /elf32/);
2776 $code.=<<___ if ($flavour =~ /elf32/);
2783 .size poly1305_init_base2_44,.-poly1305_init_base2_44
2786 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2787 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2788 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2791 .type poly1305_blocks_vpmadd52,\@function,4
2793 poly1305_blocks_vpmadd52:
2795 jz .Lno_data_vpmadd52 # too short
2798 mov 64($ctx),%r8 # peek on power of the key
2800 # if powers of the key are not calculated yet, process up to 3
2801 # blocks with this single-block subroutine, otherwise ensure that
2802 # length is divisible by 2 blocks and pass the rest down to next
2807 cmp \$4,$len # is input long
2809 test %r8,%r8 # is power value impossible?
2812 and $len,%rax # is input of favourable length?
2813 jz .Lblocks_vpmadd52_4x
2819 lea .L2_44_inp_permd(%rip),%r10
2822 vmovq $padbit,%x#$PAD
2823 vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd
2824 vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift
2825 vpermq \$0xcf,$PAD,$PAD
2826 vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask
2828 vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value
2829 vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys
2830 vmovdqu64 32($ctx),${r1r0s2}{%k7}{z}
2831 vmovdqu64 24($ctx),${r0s2s1}{%k7}{z}
2833 vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt
2834 vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft
2840 vmovdqu32 0($inp),%x#$T0 # load input as ----3210
2843 vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110
2844 vpsrlvq $inp_shift,$T0,$T0
2845 vpandq $reduc_mask,$T0,$T0
2848 vpaddq $T0,$Dlo,$Dlo # accumulate input
2850 vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value
2851 vpermq \$0b01010101,$Dlo,${H1}{%k7}{z}
2852 vpermq \$0b10101010,$Dlo,${H2}{%k7}{z}
2854 vpxord $Dlo,$Dlo,$Dlo
2855 vpxord $Dhi,$Dhi,$Dhi
2857 vpmadd52luq $r2r1r0,$H0,$Dlo
2858 vpmadd52huq $r2r1r0,$H0,$Dhi
2860 vpmadd52luq $r1r0s2,$H1,$Dlo
2861 vpmadd52huq $r1r0s2,$H1,$Dhi
2863 vpmadd52luq $r0s2s1,$H2,$Dlo
2864 vpmadd52huq $r0s2s1,$H2,$Dhi
2866 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword
2867 vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword
2868 vpandq $reduc_mask,$Dlo,$Dlo
2870 vpaddq $T0,$Dhi,$Dhi
2872 vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword
2874 vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-)
2876 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word
2877 vpandq $reduc_mask,$Dlo,$Dlo
2879 vpermq \$0b10010011,$T0,$T0
2881 vpaddq $T0,$Dlo,$Dlo
2883 vpermq \$0b10010011,$Dlo,${T0}{%k1}{z}
2885 vpaddq $T0,$Dlo,$Dlo
2888 vpaddq $T0,$Dlo,$Dlo
2893 vmovdqu64 $Dlo,0($ctx){%k7} # store hash value
2896 jnz .Lblocks_vpmadd52_4x
2900 .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
2904 ########################################################################
2905 # As implied by its name 4x subroutine processes 4 blocks in parallel
2906 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
2907 # and is handled in 256-bit %ymm registers.
2909 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
2910 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
2911 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
2914 .type poly1305_blocks_vpmadd52_4x,\@function,4
2916 poly1305_blocks_vpmadd52_4x:
2918 jz .Lno_data_vpmadd52_4x # too short
2921 mov 64($ctx),%r8 # peek on power of the key
2923 .Lblocks_vpmadd52_4x:
2924 vpbroadcastq $padbit,$PAD
2926 vmovdqa64 .Lx_mask44(%rip),$mask44
2928 vmovdqa64 .Lx_mask42(%rip),$mask42
2929 kmovw %eax,%k1 # used in 2x path
2931 test %r8,%r8 # is power value impossible?
2932 js .Linit_vpmadd52 # if it is, then init R[4]
2934 vmovq 0($ctx),%x#$H0 # load current hash value
2935 vmovq 8($ctx),%x#$H1
2936 vmovq 16($ctx),%x#$H2
2938 test \$3,$len # is length 4*n+2?
2939 jnz .Lblocks_vpmadd52_2x_do
2941 .Lblocks_vpmadd52_4x_do:
2942 vpbroadcastq 64($ctx),$R0 # load 4th power of the key
2943 vpbroadcastq 96($ctx),$R1
2944 vpbroadcastq 128($ctx),$R2
2945 vpbroadcastq 160($ctx),$S1
2947 .Lblocks_vpmadd52_4x_key_loaded:
2948 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
2952 test \$7,$len # is len 8*n?
2953 jz .Lblocks_vpmadd52_8x
2955 vmovdqu64 16*0($inp),$T2 # load data
2956 vmovdqu64 16*2($inp),$T3
2959 vpunpcklqdq $T3,$T2,$T1 # transpose data
2960 vpunpckhqdq $T3,$T2,$T3
2962 # at this point 64-bit lanes are ordered as 3-1-2-0
2964 vpsrlq \$24,$T3,$T2 # splat the data
2966 vpaddq $T2,$H2,$H2 # accumulate input
2967 vpandq $mask44,$T1,$T0
2971 vpandq $mask44,$T1,$T1
2974 jz .Ltail_vpmadd52_4x
2975 jmp .Loop_vpmadd52_4x
2980 vmovq 24($ctx),%x#$S1 # load key
2981 vmovq 56($ctx),%x#$H2
2982 vmovq 32($ctx),%x#$S2
2983 vmovq 40($ctx),%x#$R0
2984 vmovq 48($ctx),%x#$R1
2992 .Lmul_init_vpmadd52:
2993 vpxorq $D0lo,$D0lo,$D0lo
2994 vpmadd52luq $H2,$S1,$D0lo
2995 vpxorq $D0hi,$D0hi,$D0hi
2996 vpmadd52huq $H2,$S1,$D0hi
2997 vpxorq $D1lo,$D1lo,$D1lo
2998 vpmadd52luq $H2,$S2,$D1lo
2999 vpxorq $D1hi,$D1hi,$D1hi
3000 vpmadd52huq $H2,$S2,$D1hi
3001 vpxorq $D2lo,$D2lo,$D2lo
3002 vpmadd52luq $H2,$R0,$D2lo
3003 vpxorq $D2hi,$D2hi,$D2hi
3004 vpmadd52huq $H2,$R0,$D2hi
3006 vpmadd52luq $H0,$R0,$D0lo
3007 vpmadd52huq $H0,$R0,$D0hi
3008 vpmadd52luq $H0,$R1,$D1lo
3009 vpmadd52huq $H0,$R1,$D1hi
3010 vpmadd52luq $H0,$R2,$D2lo
3011 vpmadd52huq $H0,$R2,$D2hi
3013 vpmadd52luq $H1,$S2,$D0lo
3014 vpmadd52huq $H1,$S2,$D0hi
3015 vpmadd52luq $H1,$R0,$D1lo
3016 vpmadd52huq $H1,$R0,$D1hi
3017 vpmadd52luq $H1,$R1,$D2lo
3018 vpmadd52huq $H1,$R1,$D2hi
3020 ################################################################
3022 vpsrlq \$44,$D0lo,$tmp
3023 vpsllq \$8,$D0hi,$D0hi
3024 vpandq $mask44,$D0lo,$H0
3025 vpaddq $tmp,$D0hi,$D0hi
3027 vpaddq $D0hi,$D1lo,$D1lo
3029 vpsrlq \$44,$D1lo,$tmp
3030 vpsllq \$8,$D1hi,$D1hi
3031 vpandq $mask44,$D1lo,$H1
3032 vpaddq $tmp,$D1hi,$D1hi
3034 vpaddq $D1hi,$D2lo,$D2lo
3036 vpsrlq \$42,$D2lo,$tmp
3037 vpsllq \$10,$D2hi,$D2hi
3038 vpandq $mask42,$D2lo,$H2
3039 vpaddq $tmp,$D2hi,$D2hi
3041 vpaddq $D2hi,$H0,$H0
3042 vpsllq \$2,$D2hi,$D2hi
3044 vpaddq $D2hi,$H0,$H0
3046 vpsrlq \$44,$H0,$tmp # additional step
3047 vpandq $mask44,$H0,$H0
3052 jz .Ldone_init_vpmadd52
3054 vpunpcklqdq $R1,$H1,$R1 # 1,2
3055 vpbroadcastq %x#$H1,%x#$H1 # 2,2
3056 vpunpcklqdq $R2,$H2,$R2
3057 vpbroadcastq %x#$H2,%x#$H2
3058 vpunpcklqdq $R0,$H0,$R0
3059 vpbroadcastq %x#$H0,%x#$H0
3061 vpsllq \$2,$R1,$S1 # S1 = R1*5*4
3062 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3068 jmp .Lmul_init_vpmadd52
3072 .Ldone_init_vpmadd52:
3073 vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4
3074 vinserti128 \$1,%x#$R2,$H2,$R2
3075 vinserti128 \$1,%x#$R0,$H0,$R0
3077 vpermq \$0b11011000,$R1,$R1 # 1,3,2,4
3078 vpermq \$0b11011000,$R2,$R2
3079 vpermq \$0b11011000,$R0,$R0
3081 vpsllq \$2,$R1,$S1 # S1 = R1*5*4
3085 vmovq 0($ctx),%x#$H0 # load current hash value
3086 vmovq 8($ctx),%x#$H1
3087 vmovq 16($ctx),%x#$H2
3089 test \$3,$len # is length 4*n+2?
3090 jnz .Ldone_init_vpmadd52_2x
3092 vmovdqu64 $R0,64($ctx) # save key powers
3093 vpbroadcastq %x#$R0,$R0 # broadcast 4th power
3094 vmovdqu64 $R1,96($ctx)
3095 vpbroadcastq %x#$R1,$R1
3096 vmovdqu64 $R2,128($ctx)
3097 vpbroadcastq %x#$R2,$R2
3098 vmovdqu64 $S1,160($ctx)
3099 vpbroadcastq %x#$S1,$S1
3101 jmp .Lblocks_vpmadd52_4x_key_loaded
3105 .Ldone_init_vpmadd52_2x:
3106 vmovdqu64 $R0,64($ctx) # save key powers
3107 vpsrldq \$8,$R0,$R0 # 0-1-0-2
3108 vmovdqu64 $R1,96($ctx)
3110 vmovdqu64 $R2,128($ctx)
3112 vmovdqu64 $S1,160($ctx)
3114 jmp .Lblocks_vpmadd52_2x_key_loaded
3118 .Lblocks_vpmadd52_2x_do:
3119 vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers
3120 vmovdqu64 160+8($ctx),${S1}{%k1}{z}
3121 vmovdqu64 64+8($ctx),${R0}{%k1}{z}
3122 vmovdqu64 96+8($ctx),${R1}{%k1}{z}
3124 .Lblocks_vpmadd52_2x_key_loaded:
3125 vmovdqu64 16*0($inp),$T2 # load data
3129 vpunpcklqdq $T3,$T2,$T1 # transpose data
3130 vpunpckhqdq $T3,$T2,$T3
3132 # at this point 64-bit lanes are ordered as x-1-x-0
3134 vpsrlq \$24,$T3,$T2 # splat the data
3136 vpaddq $T2,$H2,$H2 # accumulate input
3137 vpandq $mask44,$T1,$T0
3141 vpandq $mask44,$T1,$T1
3143 jmp .Ltail_vpmadd52_2x
3148 #vpaddq $T2,$H2,$H2 # accumulate input
3152 vpxorq $D0lo,$D0lo,$D0lo
3153 vpmadd52luq $H2,$S1,$D0lo
3154 vpxorq $D0hi,$D0hi,$D0hi
3155 vpmadd52huq $H2,$S1,$D0hi
3156 vpxorq $D1lo,$D1lo,$D1lo
3157 vpmadd52luq $H2,$S2,$D1lo
3158 vpxorq $D1hi,$D1hi,$D1hi
3159 vpmadd52huq $H2,$S2,$D1hi
3160 vpxorq $D2lo,$D2lo,$D2lo
3161 vpmadd52luq $H2,$R0,$D2lo
3162 vpxorq $D2hi,$D2hi,$D2hi
3163 vpmadd52huq $H2,$R0,$D2hi
3165 vmovdqu64 16*0($inp),$T2 # load data
3166 vmovdqu64 16*2($inp),$T3
3168 vpmadd52luq $H0,$R0,$D0lo
3169 vpmadd52huq $H0,$R0,$D0hi
3170 vpmadd52luq $H0,$R1,$D1lo
3171 vpmadd52huq $H0,$R1,$D1hi
3172 vpmadd52luq $H0,$R2,$D2lo
3173 vpmadd52huq $H0,$R2,$D2hi
3175 vpunpcklqdq $T3,$T2,$T1 # transpose data
3176 vpunpckhqdq $T3,$T2,$T3
3177 vpmadd52luq $H1,$S2,$D0lo
3178 vpmadd52huq $H1,$S2,$D0hi
3179 vpmadd52luq $H1,$R0,$D1lo
3180 vpmadd52huq $H1,$R0,$D1hi
3181 vpmadd52luq $H1,$R1,$D2lo
3182 vpmadd52huq $H1,$R1,$D2hi
3184 ################################################################
3185 # partial reduction (interleaved with data splat)
3186 vpsrlq \$44,$D0lo,$tmp
3187 vpsllq \$8,$D0hi,$D0hi
3188 vpandq $mask44,$D0lo,$H0
3189 vpaddq $tmp,$D0hi,$D0hi
3193 vpaddq $D0hi,$D1lo,$D1lo
3195 vpsrlq \$44,$D1lo,$tmp
3196 vpsllq \$8,$D1hi,$D1hi
3197 vpandq $mask44,$D1lo,$H1
3198 vpaddq $tmp,$D1hi,$D1hi
3200 vpandq $mask44,$T1,$T0
3203 vpaddq $D1hi,$D2lo,$D2lo
3205 vpsrlq \$42,$D2lo,$tmp
3206 vpsllq \$10,$D2hi,$D2hi
3207 vpandq $mask42,$D2lo,$H2
3208 vpaddq $tmp,$D2hi,$D2hi
3210 vpaddq $T2,$H2,$H2 # accumulate input
3211 vpaddq $D2hi,$H0,$H0
3212 vpsllq \$2,$D2hi,$D2hi
3214 vpaddq $D2hi,$H0,$H0
3216 vpandq $mask44,$T1,$T1
3218 vpsrlq \$44,$H0,$tmp # additional step
3219 vpandq $mask44,$H0,$H0
3223 sub \$4,$len # len-=64
3224 jnz .Loop_vpmadd52_4x
3227 vmovdqu64 128($ctx),$R2 # load all key powers
3228 vmovdqu64 160($ctx),$S1
3229 vmovdqu64 64($ctx),$R0
3230 vmovdqu64 96($ctx),$R1
3233 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3237 #vpaddq $T2,$H2,$H2 # accumulate input
3241 vpxorq $D0lo,$D0lo,$D0lo
3242 vpmadd52luq $H2,$S1,$D0lo
3243 vpxorq $D0hi,$D0hi,$D0hi
3244 vpmadd52huq $H2,$S1,$D0hi
3245 vpxorq $D1lo,$D1lo,$D1lo
3246 vpmadd52luq $H2,$S2,$D1lo
3247 vpxorq $D1hi,$D1hi,$D1hi
3248 vpmadd52huq $H2,$S2,$D1hi
3249 vpxorq $D2lo,$D2lo,$D2lo
3250 vpmadd52luq $H2,$R0,$D2lo
3251 vpxorq $D2hi,$D2hi,$D2hi
3252 vpmadd52huq $H2,$R0,$D2hi
3254 vpmadd52luq $H0,$R0,$D0lo
3255 vpmadd52huq $H0,$R0,$D0hi
3256 vpmadd52luq $H0,$R1,$D1lo
3257 vpmadd52huq $H0,$R1,$D1hi
3258 vpmadd52luq $H0,$R2,$D2lo
3259 vpmadd52huq $H0,$R2,$D2hi
3261 vpmadd52luq $H1,$S2,$D0lo
3262 vpmadd52huq $H1,$S2,$D0hi
3263 vpmadd52luq $H1,$R0,$D1lo
3264 vpmadd52huq $H1,$R0,$D1hi
3265 vpmadd52luq $H1,$R1,$D2lo
3266 vpmadd52huq $H1,$R1,$D2hi
3268 ################################################################
3269 # horizontal addition
3273 vpsrldq \$8,$D0lo,$T0
3274 vpsrldq \$8,$D0hi,$H0
3275 vpsrldq \$8,$D1lo,$T1
3276 vpsrldq \$8,$D1hi,$H1
3277 vpaddq $T0,$D0lo,$D0lo
3278 vpaddq $H0,$D0hi,$D0hi
3279 vpsrldq \$8,$D2lo,$T2
3280 vpsrldq \$8,$D2hi,$H2
3281 vpaddq $T1,$D1lo,$D1lo
3282 vpaddq $H1,$D1hi,$D1hi
3283 vpermq \$0x2,$D0lo,$T0
3284 vpermq \$0x2,$D0hi,$H0
3285 vpaddq $T2,$D2lo,$D2lo
3286 vpaddq $H2,$D2hi,$D2hi
3288 vpermq \$0x2,$D1lo,$T1
3289 vpermq \$0x2,$D1hi,$H1
3290 vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
3291 vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
3292 vpermq \$0x2,$D2lo,$T2
3293 vpermq \$0x2,$D2hi,$H2
3294 vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
3295 vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
3296 vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
3297 vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
3299 ################################################################
3301 vpsrlq \$44,$D0lo,$tmp
3302 vpsllq \$8,$D0hi,$D0hi
3303 vpandq $mask44,$D0lo,$H0
3304 vpaddq $tmp,$D0hi,$D0hi
3306 vpaddq $D0hi,$D1lo,$D1lo
3308 vpsrlq \$44,$D1lo,$tmp
3309 vpsllq \$8,$D1hi,$D1hi
3310 vpandq $mask44,$D1lo,$H1
3311 vpaddq $tmp,$D1hi,$D1hi
3313 vpaddq $D1hi,$D2lo,$D2lo
3315 vpsrlq \$42,$D2lo,$tmp
3316 vpsllq \$10,$D2hi,$D2hi
3317 vpandq $mask42,$D2lo,$H2
3318 vpaddq $tmp,$D2hi,$D2hi
3320 vpaddq $D2hi,$H0,$H0
3321 vpsllq \$2,$D2hi,$D2hi
3323 vpaddq $D2hi,$H0,$H0
3325 vpsrlq \$44,$H0,$tmp # additional step
3326 vpandq $mask44,$H0,$H0
3329 # at this point $len is
3330 # either 4*n+2 or 0...
3331 sub \$2,$len # len-=32
3332 ja .Lblocks_vpmadd52_4x_do
3334 vmovq %x#$H0,0($ctx)
3335 vmovq %x#$H1,8($ctx)
3336 vmovq %x#$H2,16($ctx)
3339 .Lno_data_vpmadd52_4x:
3341 .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
3345 ########################################################################
3346 # As implied by its name 8x subroutine processes 8 blocks in parallel...
3347 # This is intermediate version, as it's used only in cases when input
3348 # length is either 8*n, 8*n+1 or 8*n+2...
3350 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3351 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3352 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3353 my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
3356 .type poly1305_blocks_vpmadd52_8x,\@function,4
3358 poly1305_blocks_vpmadd52_8x:
3360 jz .Lno_data_vpmadd52_8x # too short
3363 mov 64($ctx),%r8 # peek on power of the key
3365 vmovdqa64 .Lx_mask44(%rip),$mask44
3366 vmovdqa64 .Lx_mask42(%rip),$mask42
3368 test %r8,%r8 # is power value impossible?
3369 js .Linit_vpmadd52 # if it is, then init R[4]
3371 vmovq 0($ctx),%x#$H0 # load current hash value
3372 vmovq 8($ctx),%x#$H1
3373 vmovq 16($ctx),%x#$H2
3375 .Lblocks_vpmadd52_8x:
3376 ################################################################
3377 # fist we calculate more key powers
3379 vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers
3380 vmovdqu64 160($ctx),$S1
3381 vmovdqu64 64($ctx),$R0
3382 vmovdqu64 96($ctx),$R1
3384 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3388 vpbroadcastq %x#$R2,$RR2 # broadcast 4th power
3389 vpbroadcastq %x#$R0,$RR0
3390 vpbroadcastq %x#$R1,$RR1
3392 vpxorq $D0lo,$D0lo,$D0lo
3393 vpmadd52luq $RR2,$S1,$D0lo
3394 vpxorq $D0hi,$D0hi,$D0hi
3395 vpmadd52huq $RR2,$S1,$D0hi
3396 vpxorq $D1lo,$D1lo,$D1lo
3397 vpmadd52luq $RR2,$S2,$D1lo
3398 vpxorq $D1hi,$D1hi,$D1hi
3399 vpmadd52huq $RR2,$S2,$D1hi
3400 vpxorq $D2lo,$D2lo,$D2lo
3401 vpmadd52luq $RR2,$R0,$D2lo
3402 vpxorq $D2hi,$D2hi,$D2hi
3403 vpmadd52huq $RR2,$R0,$D2hi
3405 vpmadd52luq $RR0,$R0,$D0lo
3406 vpmadd52huq $RR0,$R0,$D0hi
3407 vpmadd52luq $RR0,$R1,$D1lo
3408 vpmadd52huq $RR0,$R1,$D1hi
3409 vpmadd52luq $RR0,$R2,$D2lo
3410 vpmadd52huq $RR0,$R2,$D2hi
3412 vpmadd52luq $RR1,$S2,$D0lo
3413 vpmadd52huq $RR1,$S2,$D0hi
3414 vpmadd52luq $RR1,$R0,$D1lo
3415 vpmadd52huq $RR1,$R0,$D1hi
3416 vpmadd52luq $RR1,$R1,$D2lo
3417 vpmadd52huq $RR1,$R1,$D2hi
3419 ################################################################
3421 vpsrlq \$44,$D0lo,$tmp
3422 vpsllq \$8,$D0hi,$D0hi
3423 vpandq $mask44,$D0lo,$RR0
3424 vpaddq $tmp,$D0hi,$D0hi
3426 vpaddq $D0hi,$D1lo,$D1lo
3428 vpsrlq \$44,$D1lo,$tmp
3429 vpsllq \$8,$D1hi,$D1hi
3430 vpandq $mask44,$D1lo,$RR1
3431 vpaddq $tmp,$D1hi,$D1hi
3433 vpaddq $D1hi,$D2lo,$D2lo
3435 vpsrlq \$42,$D2lo,$tmp
3436 vpsllq \$10,$D2hi,$D2hi
3437 vpandq $mask42,$D2lo,$RR2
3438 vpaddq $tmp,$D2hi,$D2hi
3440 vpaddq $D2hi,$RR0,$RR0
3441 vpsllq \$2,$D2hi,$D2hi
3443 vpaddq $D2hi,$RR0,$RR0
3445 vpsrlq \$44,$RR0,$tmp # additional step
3446 vpandq $mask44,$RR0,$RR0
3448 vpaddq $tmp,$RR1,$RR1
3450 ################################################################
3451 # At this point Rx holds 1324 powers, RRx - 5768, and the goal
3452 # is 15263748, which reflects how data is loaded...
3454 vpunpcklqdq $R2,$RR2,$T2 # 3748
3455 vpunpckhqdq $R2,$RR2,$R2 # 1526
3456 vpunpcklqdq $R0,$RR0,$T0
3457 vpunpckhqdq $R0,$RR0,$R0
3458 vpunpcklqdq $R1,$RR1,$T1
3459 vpunpckhqdq $R1,$RR1,$R1
3461 ######## switch to %zmm
3462 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3463 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3464 map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3465 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
3468 vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748
3469 vshufi64x2 \$0x44,$R0,$T0,$RR0
3470 vshufi64x2 \$0x44,$R1,$T1,$RR1
3472 vmovdqu64 16*0($inp),$T2 # load data
3473 vmovdqu64 16*4($inp),$T3
3476 vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4
3477 vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4
3478 vpaddq $RR2,$SS2,$SS2
3479 vpaddq $RR1,$SS1,$SS1
3480 vpsllq \$2,$SS2,$SS2
3481 vpsllq \$2,$SS1,$SS1
3483 vpbroadcastq $padbit,$PAD
3484 vpbroadcastq %x#$mask44,$mask44
3485 vpbroadcastq %x#$mask42,$mask42
3487 vpbroadcastq %x#$SS1,$S1 # broadcast 8th power
3488 vpbroadcastq %x#$SS2,$S2
3489 vpbroadcastq %x#$RR0,$R0
3490 vpbroadcastq %x#$RR1,$R1
3491 vpbroadcastq %x#$RR2,$R2
3493 vpunpcklqdq $T3,$T2,$T1 # transpose data
3494 vpunpckhqdq $T3,$T2,$T3
3496 # at this point 64-bit lanes are ordered as 73625140
3498 vpsrlq \$24,$T3,$T2 # splat the data
3500 vpaddq $T2,$H2,$H2 # accumulate input
3501 vpandq $mask44,$T1,$T0
3505 vpandq $mask44,$T1,$T1
3508 jz .Ltail_vpmadd52_8x
3509 jmp .Loop_vpmadd52_8x
3513 #vpaddq $T2,$H2,$H2 # accumulate input
3517 vpxorq $D0lo,$D0lo,$D0lo
3518 vpmadd52luq $H2,$S1,$D0lo
3519 vpxorq $D0hi,$D0hi,$D0hi
3520 vpmadd52huq $H2,$S1,$D0hi
3521 vpxorq $D1lo,$D1lo,$D1lo
3522 vpmadd52luq $H2,$S2,$D1lo
3523 vpxorq $D1hi,$D1hi,$D1hi
3524 vpmadd52huq $H2,$S2,$D1hi
3525 vpxorq $D2lo,$D2lo,$D2lo
3526 vpmadd52luq $H2,$R0,$D2lo
3527 vpxorq $D2hi,$D2hi,$D2hi
3528 vpmadd52huq $H2,$R0,$D2hi
3530 vmovdqu64 16*0($inp),$T2 # load data
3531 vmovdqu64 16*4($inp),$T3
3533 vpmadd52luq $H0,$R0,$D0lo
3534 vpmadd52huq $H0,$R0,$D0hi
3535 vpmadd52luq $H0,$R1,$D1lo
3536 vpmadd52huq $H0,$R1,$D1hi
3537 vpmadd52luq $H0,$R2,$D2lo
3538 vpmadd52huq $H0,$R2,$D2hi
3540 vpunpcklqdq $T3,$T2,$T1 # transpose data
3541 vpunpckhqdq $T3,$T2,$T3
3542 vpmadd52luq $H1,$S2,$D0lo
3543 vpmadd52huq $H1,$S2,$D0hi
3544 vpmadd52luq $H1,$R0,$D1lo
3545 vpmadd52huq $H1,$R0,$D1hi
3546 vpmadd52luq $H1,$R1,$D2lo
3547 vpmadd52huq $H1,$R1,$D2hi
3549 ################################################################
3550 # partial reduction (interleaved with data splat)
3551 vpsrlq \$44,$D0lo,$tmp
3552 vpsllq \$8,$D0hi,$D0hi
3553 vpandq $mask44,$D0lo,$H0
3554 vpaddq $tmp,$D0hi,$D0hi
3558 vpaddq $D0hi,$D1lo,$D1lo
3560 vpsrlq \$44,$D1lo,$tmp
3561 vpsllq \$8,$D1hi,$D1hi
3562 vpandq $mask44,$D1lo,$H1
3563 vpaddq $tmp,$D1hi,$D1hi
3565 vpandq $mask44,$T1,$T0
3568 vpaddq $D1hi,$D2lo,$D2lo
3570 vpsrlq \$42,$D2lo,$tmp
3571 vpsllq \$10,$D2hi,$D2hi
3572 vpandq $mask42,$D2lo,$H2
3573 vpaddq $tmp,$D2hi,$D2hi
3575 vpaddq $T2,$H2,$H2 # accumulate input
3576 vpaddq $D2hi,$H0,$H0
3577 vpsllq \$2,$D2hi,$D2hi
3579 vpaddq $D2hi,$H0,$H0
3581 vpandq $mask44,$T1,$T1
3583 vpsrlq \$44,$H0,$tmp # additional step
3584 vpandq $mask44,$H0,$H0
3588 sub \$8,$len # len-=128
3589 jnz .Loop_vpmadd52_8x
3592 #vpaddq $T2,$H2,$H2 # accumulate input
3596 vpxorq $D0lo,$D0lo,$D0lo
3597 vpmadd52luq $H2,$SS1,$D0lo
3598 vpxorq $D0hi,$D0hi,$D0hi
3599 vpmadd52huq $H2,$SS1,$D0hi
3600 vpxorq $D1lo,$D1lo,$D1lo
3601 vpmadd52luq $H2,$SS2,$D1lo
3602 vpxorq $D1hi,$D1hi,$D1hi
3603 vpmadd52huq $H2,$SS2,$D1hi
3604 vpxorq $D2lo,$D2lo,$D2lo
3605 vpmadd52luq $H2,$RR0,$D2lo
3606 vpxorq $D2hi,$D2hi,$D2hi
3607 vpmadd52huq $H2,$RR0,$D2hi
3609 vpmadd52luq $H0,$RR0,$D0lo
3610 vpmadd52huq $H0,$RR0,$D0hi
3611 vpmadd52luq $H0,$RR1,$D1lo
3612 vpmadd52huq $H0,$RR1,$D1hi
3613 vpmadd52luq $H0,$RR2,$D2lo
3614 vpmadd52huq $H0,$RR2,$D2hi
3616 vpmadd52luq $H1,$SS2,$D0lo
3617 vpmadd52huq $H1,$SS2,$D0hi
3618 vpmadd52luq $H1,$RR0,$D1lo
3619 vpmadd52huq $H1,$RR0,$D1hi
3620 vpmadd52luq $H1,$RR1,$D2lo
3621 vpmadd52huq $H1,$RR1,$D2hi
3623 ################################################################
3624 # horizontal addition
3628 vpsrldq \$8,$D0lo,$T0
3629 vpsrldq \$8,$D0hi,$H0
3630 vpsrldq \$8,$D1lo,$T1
3631 vpsrldq \$8,$D1hi,$H1
3632 vpaddq $T0,$D0lo,$D0lo
3633 vpaddq $H0,$D0hi,$D0hi
3634 vpsrldq \$8,$D2lo,$T2
3635 vpsrldq \$8,$D2hi,$H2
3636 vpaddq $T1,$D1lo,$D1lo
3637 vpaddq $H1,$D1hi,$D1hi
3638 vpermq \$0x2,$D0lo,$T0
3639 vpermq \$0x2,$D0hi,$H0
3640 vpaddq $T2,$D2lo,$D2lo
3641 vpaddq $H2,$D2hi,$D2hi
3643 vpermq \$0x2,$D1lo,$T1
3644 vpermq \$0x2,$D1hi,$H1
3645 vpaddq $T0,$D0lo,$D0lo
3646 vpaddq $H0,$D0hi,$D0hi
3647 vpermq \$0x2,$D2lo,$T2
3648 vpermq \$0x2,$D2hi,$H2
3649 vpaddq $T1,$D1lo,$D1lo
3650 vpaddq $H1,$D1hi,$D1hi
3651 vextracti64x4 \$1,$D0lo,%y#$T0
3652 vextracti64x4 \$1,$D0hi,%y#$H0
3653 vpaddq $T2,$D2lo,$D2lo
3654 vpaddq $H2,$D2hi,$D2hi
3656 vextracti64x4 \$1,$D1lo,%y#$T1
3657 vextracti64x4 \$1,$D1hi,%y#$H1
3658 vextracti64x4 \$1,$D2lo,%y#$T2
3659 vextracti64x4 \$1,$D2hi,%y#$H2
3661 ######## switch back to %ymm
3662 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3663 map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3664 map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3667 vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
3668 vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
3669 vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
3670 vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
3671 vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
3672 vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
3674 ################################################################
3676 vpsrlq \$44,$D0lo,$tmp
3677 vpsllq \$8,$D0hi,$D0hi
3678 vpandq $mask44,$D0lo,$H0
3679 vpaddq $tmp,$D0hi,$D0hi
3681 vpaddq $D0hi,$D1lo,$D1lo
3683 vpsrlq \$44,$D1lo,$tmp
3684 vpsllq \$8,$D1hi,$D1hi
3685 vpandq $mask44,$D1lo,$H1
3686 vpaddq $tmp,$D1hi,$D1hi
3688 vpaddq $D1hi,$D2lo,$D2lo
3690 vpsrlq \$42,$D2lo,$tmp
3691 vpsllq \$10,$D2hi,$D2hi
3692 vpandq $mask42,$D2lo,$H2
3693 vpaddq $tmp,$D2hi,$D2hi
3695 vpaddq $D2hi,$H0,$H0
3696 vpsllq \$2,$D2hi,$D2hi
3698 vpaddq $D2hi,$H0,$H0
3700 vpsrlq \$44,$H0,$tmp # additional step
3701 vpandq $mask44,$H0,$H0
3705 ################################################################
3707 vmovq %x#$H0,0($ctx)
3708 vmovq %x#$H1,8($ctx)
3709 vmovq %x#$H2,16($ctx)
3712 .Lno_data_vpmadd52_8x:
3714 .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
3718 .type poly1305_emit_base2_44,\@function,3
3720 poly1305_emit_base2_44:
3721 mov 0($ctx),%r8 # load hash value
3737 add \$5,%r8 # compare to modulus
3741 shr \$2,%r10 # did 130-bit value overflow?
3745 add 0($nonce),%rax # accumulate nonce
3747 mov %rax,0($mac) # write result
3751 .size poly1305_emit_base2_44,.-poly1305_emit_base2_44
3758 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
3760 .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
3762 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
3764 .long 2,2,2,3,2,0,2,1
3766 .long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
3769 .long 0,1,1,2,2,3,7,7
3773 .quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
3781 .quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
3782 .quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
3784 .quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
3785 .quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
3790 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3794 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3795 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3803 .extern __imp_RtlVirtualUnwind
3804 .type se_handler,\@abi-omnipotent
3818 mov 120($context),%rax # pull context->Rax
3819 mov 248($context),%rbx # pull context->Rip
3821 mov 8($disp),%rsi # disp->ImageBase
3822 mov 56($disp),%r11 # disp->HandlerData
3824 mov 0(%r11),%r10d # HandlerData[0]
3825 lea (%rsi,%r10),%r10 # prologue label
3826 cmp %r10,%rbx # context->Rip<.Lprologue
3827 jb .Lcommon_seh_tail
3829 mov 152($context),%rax # pull context->Rsp
3831 mov 4(%r11),%r10d # HandlerData[1]
3832 lea (%rsi,%r10),%r10 # epilogue label
3833 cmp %r10,%rbx # context->Rip>=.Lepilogue
3834 jae .Lcommon_seh_tail
3844 mov %rbx,144($context) # restore context->Rbx
3845 mov %rbp,160($context) # restore context->Rbp
3846 mov %r12,216($context) # restore context->R12
3847 mov %r13,224($context) # restore context->R13
3848 mov %r14,232($context) # restore context->R14
3849 mov %r15,240($context) # restore context->R14
3851 jmp .Lcommon_seh_tail
3852 .size se_handler,.-se_handler
3854 .type avx_handler,\@abi-omnipotent
3868 mov 120($context),%rax # pull context->Rax
3869 mov 248($context),%rbx # pull context->Rip
3871 mov 8($disp),%rsi # disp->ImageBase
3872 mov 56($disp),%r11 # disp->HandlerData
3874 mov 0(%r11),%r10d # HandlerData[0]
3875 lea (%rsi,%r10),%r10 # prologue label
3876 cmp %r10,%rbx # context->Rip<prologue label
3877 jb .Lcommon_seh_tail
3879 mov 152($context),%rax # pull context->Rsp
3881 mov 4(%r11),%r10d # HandlerData[1]
3882 lea (%rsi,%r10),%r10 # epilogue label
3883 cmp %r10,%rbx # context->Rip>=epilogue label
3884 jae .Lcommon_seh_tail
3886 mov 208($context),%rax # pull context->R11
3890 lea 512($context),%rdi # &context.Xmm6
3892 .long 0xa548f3fc # cld; rep movsq
3897 mov %rax,152($context) # restore context->Rsp
3898 mov %rsi,168($context) # restore context->Rsi
3899 mov %rdi,176($context) # restore context->Rdi
3901 mov 40($disp),%rdi # disp->ContextRecord
3902 mov $context,%rsi # context
3903 mov \$154,%ecx # sizeof(CONTEXT)
3904 .long 0xa548f3fc # cld; rep movsq
3907 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3908 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3909 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3910 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3911 mov 40(%rsi),%r10 # disp->ContextRecord
3912 lea 56(%rsi),%r11 # &disp->HandlerData
3913 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3914 mov %r10,32(%rsp) # arg5
3915 mov %r11,40(%rsp) # arg6
3916 mov %r12,48(%rsp) # arg7
3917 mov %rcx,56(%rsp) # arg8, (NULL)
3918 call *__imp_RtlVirtualUnwind(%rip)
3920 mov \$1,%eax # ExceptionContinueSearch
3932 .size avx_handler,.-avx_handler
3936 .rva .LSEH_begin_poly1305_init
3937 .rva .LSEH_end_poly1305_init
3938 .rva .LSEH_info_poly1305_init
3940 .rva .LSEH_begin_poly1305_blocks
3941 .rva .LSEH_end_poly1305_blocks
3942 .rva .LSEH_info_poly1305_blocks
3944 .rva .LSEH_begin_poly1305_emit
3945 .rva .LSEH_end_poly1305_emit
3946 .rva .LSEH_info_poly1305_emit
3948 $code.=<<___ if ($avx);
3949 .rva .LSEH_begin_poly1305_blocks_avx
3951 .rva .LSEH_info_poly1305_blocks_avx_1
3955 .rva .LSEH_info_poly1305_blocks_avx_2
3958 .rva .LSEH_end_poly1305_blocks_avx
3959 .rva .LSEH_info_poly1305_blocks_avx_3
3961 .rva .LSEH_begin_poly1305_emit_avx
3962 .rva .LSEH_end_poly1305_emit_avx
3963 .rva .LSEH_info_poly1305_emit_avx
3965 $code.=<<___ if ($avx>1);
3966 .rva .LSEH_begin_poly1305_blocks_avx2
3967 .rva .Lbase2_64_avx2
3968 .rva .LSEH_info_poly1305_blocks_avx2_1
3970 .rva .Lbase2_64_avx2
3972 .rva .LSEH_info_poly1305_blocks_avx2_2
3975 .rva .LSEH_end_poly1305_blocks_avx2
3976 .rva .LSEH_info_poly1305_blocks_avx2_3
3978 $code.=<<___ if ($avx>2);
3979 .rva .LSEH_begin_poly1305_blocks_avx512
3980 .rva .LSEH_end_poly1305_blocks_avx512
3981 .rva .LSEH_info_poly1305_blocks_avx512
3986 .LSEH_info_poly1305_init:
3989 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
3991 .LSEH_info_poly1305_blocks:
3994 .rva .Lblocks_body,.Lblocks_epilogue
3996 .LSEH_info_poly1305_emit:
3999 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
4001 $code.=<<___ if ($avx);
4002 .LSEH_info_poly1305_blocks_avx_1:
4005 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
4007 .LSEH_info_poly1305_blocks_avx_2:
4010 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
4012 .LSEH_info_poly1305_blocks_avx_3:
4015 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
4017 .LSEH_info_poly1305_emit_avx:
4020 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
4022 $code.=<<___ if ($avx>1);
4023 .LSEH_info_poly1305_blocks_avx2_1:
4026 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
4028 .LSEH_info_poly1305_blocks_avx2_2:
4031 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
4033 .LSEH_info_poly1305_blocks_avx2_3:
4036 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
4038 $code.=<<___ if ($avx>2);
4039 .LSEH_info_poly1305_blocks_avx512:
4042 .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
4046 foreach (split('\n',$code)) {
4047 s/\`([^\`]*)\`/eval($1)/ge;
4048 s/%r([a-z]+)#d/%e$1/g;
4049 s/%r([0-9]+)#d/%r$1d/g;
4050 s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;