3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # This module implements Poly1305 hash for x86_64.
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
17 # IALU/gcc-4.8(*) AVX(**) AVX2
20 # Westmere 1.86/+120% -
21 # Sandy Bridge 1.39/+140% 1.10
22 # Haswell 1.10/+175% 1.11 0.65
23 # Skylake 1.12/+120% 0.96 0.51
24 # Silvermont 2.83/+95% -
25 # VIA Nano 1.82/+150% -
26 # Sledgehammer 1.38/+160% -
27 # Bulldozer 2.21/+130% 0.97
29 # (*) improvement coefficients relative to clang are more modest and
30 # are ~50% on most processors, in both cases we are comparing to
32 # (**) SSE2 implementation was attempted, but among non-AVX processors
33 # it was faster than integer-only code only on older Intel P4 and
34 # Core processors, 50-30%, less newer processor is, but slower on
35 # contemporary ones, for example almost 2x slower on Atom, and as
36 # former are naturally disappearing, SSE2 is deemed unnecessary;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
44 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
47 die "can't locate x86_64-xlate.pl";
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51 $avx = ($1>=2.19) + ($1>=2.22);
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
55 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56 $avx = ($1>=2.09) + ($1>=2.10);
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
60 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61 $avx = ($1>=10) + ($1>=12);
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65 $avx = ($2>=3.0) + ($2>3.0);
68 open OUT,"| \"$^X\" $xlate $flavour $output";
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len); # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
76 sub poly1305_iteration {
77 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output: $h0-$h2 *= $r0-$r1
86 mov %rax,$h0 # future $h0
96 mov $h2,$h1 # borrow $h1
100 imulq $s1,$h1 # h2*s1
105 imulq $r0,$h2 # h2*r0
107 mov \$-4,%rax # mask value
110 and $d3,%rax # last reduction step
120 ########################################################################
121 # Layout of opaque area is following.
123 # unsigned __int64 h[3]; # current hash value base 2^64
124 # unsigned __int64 r[2]; # key value base 2^64
129 .extern OPENSSL_ia32cap_P
132 .type poly1305_init,\@function,2
136 mov %rax,0($ctx) # initialize hash value
143 lea poly1305_blocks(%rip),%r10
144 lea poly1305_emit(%rip),%r11
146 $code.=<<___ if ($avx);
147 mov OPENSSL_ia32cap_P+4(%rip),%r9
148 lea poly1305_blocks_avx(%rip),%rax
149 lea poly1305_emit_avx(%rip),%rcx
150 bt \$`60-32`,%r9 # AVX?
154 $code.=<<___ if ($avx>1);
155 lea poly1305_blocks_avx2(%rip),%rax
156 bt \$`5+32`,%r9 # AVX2?
160 mov \$0x0ffffffc0fffffff,%rax
161 mov \$0x0ffffffc0ffffffc,%rcx
173 .size poly1305_init,.-poly1305_init
175 .globl poly1305_blocks
176 .type poly1305_blocks,\@function,4
179 sub \$16,$len # too short?
190 mov $len,%r15 # reassign $len
192 mov 24($ctx),$r0 # load r
195 mov 0($ctx),$h0 # load hash value
202 add $r1,$s1 # s1 = r1 + (r1 >> 2)
207 add 0($inp),$h0 # accumulate input
212 &poly1305_iteration();
215 sub \$16,%r15 # len-=16
218 mov $h0,0($ctx) # store hash value
232 .size poly1305_blocks,.-poly1305_blocks
235 .type poly1305_emit,\@function,3
238 mov 0($ctx),%r8 # load hash value
243 add \$5,%r8 # compare to modulus
247 shr \$2,%r10 # did 130-bit value overfow?
251 add 0($nonce),%rax # accumulate nonce
253 mov %rax,0($mac) # write result
257 .size poly1305_emit,.-poly1305_emit
261 ########################################################################
262 # Layout of opaque area is following.
264 # unsigned __int32 h[5]; # current hash value base 2^26
265 # unsigned __int32 is_base2_26;
266 # unsigned __int64 r[2]; # key value base 2^64
267 # unsigned __int64 pad;
268 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
270 # where r^n are base 2^26 digits of degrees of multiplier key. There are
271 # 5 digits, but last four are interleaved with multiples of 5, totalling
272 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
274 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
275 map("%xmm$_",(0..15));
278 .type __poly1305_block,\@abi-omnipotent
282 &poly1305_iteration();
285 .size __poly1305_block,.-__poly1305_block
287 .type __poly1305_init_avx,\@abi-omnipotent
294 lea 48+64($ctx),$ctx # size optimization
297 call __poly1305_block # r^2
299 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
305 mov %eax,`16*0+0-64`($ctx)
307 mov %edx,`16*0+4-64`($ctx)
314 mov %eax,`16*1+0-64`($ctx)
315 lea (%rax,%rax,4),%eax # *5
316 mov %edx,`16*1+4-64`($ctx)
317 lea (%rdx,%rdx,4),%edx # *5
318 mov %eax,`16*2+0-64`($ctx)
320 mov %edx,`16*2+4-64`($ctx)
331 mov %eax,`16*3+0-64`($ctx)
332 lea (%rax,%rax,4),%eax # *5
333 mov %edx,`16*3+4-64`($ctx)
334 lea (%rdx,%rdx,4),%edx # *5
335 mov %eax,`16*4+0-64`($ctx)
337 mov %edx,`16*4+4-64`($ctx)
346 mov %eax,`16*5+0-64`($ctx)
347 lea (%rax,%rax,4),%eax # *5
348 mov %edx,`16*5+4-64`($ctx)
349 lea (%rdx,%rdx,4),%edx # *5
350 mov %eax,`16*6+0-64`($ctx)
352 mov %edx,`16*6+4-64`($ctx)
358 mov $d1#d,`16*7+0-64`($ctx)
359 lea ($d1,$d1,4),$d1 # *5
360 mov $d2#d,`16*7+4-64`($ctx)
361 lea ($d2,$d2,4),$d2 # *5
362 mov $d1#d,`16*8+0-64`($ctx)
363 mov $d2#d,`16*8+4-64`($ctx)
366 call __poly1305_block # r^3
368 mov \$0x3ffffff,%eax # save r^3 base 2^26
372 mov %eax,`16*0+12-64`($ctx)
376 mov %edx,`16*1+12-64`($ctx)
377 lea (%rdx,%rdx,4),%edx # *5
379 mov %edx,`16*2+12-64`($ctx)
385 mov %eax,`16*3+12-64`($ctx)
386 lea (%rax,%rax,4),%eax # *5
388 mov %eax,`16*4+12-64`($ctx)
393 mov %edx,`16*5+12-64`($ctx)
394 lea (%rdx,%rdx,4),%edx # *5
396 mov %edx,`16*6+12-64`($ctx)
401 mov $d1#d,`16*7+12-64`($ctx)
402 lea ($d1,$d1,4),$d1 # *5
403 mov $d1#d,`16*8+12-64`($ctx)
406 call __poly1305_block # r^4
408 mov \$0x3ffffff,%eax # save r^4 base 2^26
412 mov %eax,`16*0+8-64`($ctx)
416 mov %edx,`16*1+8-64`($ctx)
417 lea (%rdx,%rdx,4),%edx # *5
419 mov %edx,`16*2+8-64`($ctx)
425 mov %eax,`16*3+8-64`($ctx)
426 lea (%rax,%rax,4),%eax # *5
428 mov %eax,`16*4+8-64`($ctx)
433 mov %edx,`16*5+8-64`($ctx)
434 lea (%rdx,%rdx,4),%edx # *5
436 mov %edx,`16*6+8-64`($ctx)
441 mov $d1#d,`16*7+8-64`($ctx)
442 lea ($d1,$d1,4),$d1 # *5
443 mov $d1#d,`16*8+8-64`($ctx)
445 lea -48-64($ctx),$ctx # size [de-]optimization
447 .size __poly1305_init_avx,.-__poly1305_init_avx
449 .type poly1305_blocks_avx,\@function,4
452 mov 20($ctx),%r8d # is_base2_26
478 mov $len,%r15 # reassign $len
480 mov 0($ctx),$d1 # load hash value
484 mov 24($ctx),$r0 # load r
487 ################################# base 2^26 -> base 2^64
490 mov $d2,$r1 # borrow $r1
506 adc \$0,$h2 # can be partially reduced...
508 mov \$-4,$d2 # ... so reduce
520 add $r1,$s1 # s1 = r1 + (r1 >> 2)
522 add 0($inp),$h0 # accumulate input
527 call __poly1305_block
529 test $padbit,$padbit # if $padbit is zero,
530 jz .Lstore_base2_64_avx # store hash in base 2^64 format
532 ################################# base 2^64 -> base 2^26
539 and \$0x3ffffff,%rax # h[0]
541 and \$0x3ffffff,%rdx # h[1]
545 and \$0x3ffffff,$h0 # h[2]
547 and \$0x3ffffff,$h1 # h[3]
551 jz .Lstore_base2_26_avx
561 .Lstore_base2_64_avx:
564 mov $h2,16($ctx) # note that is_base2_26 is zeroed
568 .Lstore_base2_26_avx:
569 mov %rax#d,0($ctx) # store hash value base 2^26
584 .Lblocks_avx_epilogue:
597 mov $len,%r15 # reassign $len
599 mov 24($ctx),$r0 # load r
602 mov 0($ctx),$h0 # load hash value
609 add $r1,$s1 # s1 = r1 + (r1 >> 2)
614 add 0($inp),$h0 # accumulate input
620 call __poly1305_block
623 ################################# base 2^64 -> base 2^26
630 and \$0x3ffffff,%rax # h[0]
632 and \$0x3ffffff,%rdx # h[1]
636 and \$0x3ffffff,$h0 # h[2]
638 and \$0x3ffffff,$h1 # h[3]
646 movl \$1,20($ctx) # set is_base2_26
648 call __poly1305_init_avx
661 .Lbase2_64_avx_epilogue:
666 vmovd 4*0($ctx),$H0 # load hash value
674 $code.=<<___ if (!$win64);
678 $code.=<<___ if ($win64);
681 vmovdqa %xmm6,0x50(%r11)
682 vmovdqa %xmm7,0x60(%r11)
683 vmovdqa %xmm8,0x70(%r11)
684 vmovdqa %xmm9,0x80(%r11)
685 vmovdqa %xmm10,0x90(%r11)
686 vmovdqa %xmm11,0xa0(%r11)
687 vmovdqa %xmm12,0xb0(%r11)
688 vmovdqa %xmm13,0xc0(%r11)
689 vmovdqa %xmm14,0xd0(%r11)
690 vmovdqa %xmm15,0xe0(%r11)
698 vmovdqu `16*3`($ctx),$D4 # preload r0^2
699 lea `16*3+64`($ctx),$ctx # size optimization
700 lea .Lconst(%rip),%rcx
702 ################################################################
704 vmovdqu 16*2($inp),$T0
705 vmovdqu 16*3($inp),$T1
706 vmovdqa 64(%rcx),$MASK # .Lmask26
708 vpsrldq \$6,$T0,$T2 # splat input
710 vpunpckhqdq $T1,$T0,$T4 # 4
711 vpunpcklqdq $T1,$T0,$T0 # 0:1
712 vpunpcklqdq $T3,$T2,$T3 # 2:3
714 vpsrlq \$40,$T4,$T4 # 4
716 vpand $MASK,$T0,$T0 # 0
718 vpand $MASK,$T1,$T1 # 1
720 vpand $MASK,$T2,$T2 # 2
721 vpand $MASK,$T3,$T3 # 3
722 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
726 # expand and copy pre-calculated table to stack
727 vmovdqu `16*1-64`($ctx),$D1
728 vmovdqu `16*2-64`($ctx),$D2
729 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
730 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
731 vmovdqa $D3,-0x90(%r11)
732 vmovdqa $D0,0x00(%rsp)
733 vpshufd \$0xEE,$D1,$D4
734 vmovdqu `16*3-64`($ctx),$D0
735 vpshufd \$0x44,$D1,$D1
736 vmovdqa $D4,-0x80(%r11)
737 vmovdqa $D1,0x10(%rsp)
738 vpshufd \$0xEE,$D2,$D3
739 vmovdqu `16*4-64`($ctx),$D1
740 vpshufd \$0x44,$D2,$D2
741 vmovdqa $D3,-0x70(%r11)
742 vmovdqa $D2,0x20(%rsp)
743 vpshufd \$0xEE,$D0,$D4
744 vmovdqu `16*5-64`($ctx),$D2
745 vpshufd \$0x44,$D0,$D0
746 vmovdqa $D4,-0x60(%r11)
747 vmovdqa $D0,0x30(%rsp)
748 vpshufd \$0xEE,$D1,$D3
749 vmovdqu `16*6-64`($ctx),$D0
750 vpshufd \$0x44,$D1,$D1
751 vmovdqa $D3,-0x50(%r11)
752 vmovdqa $D1,0x40(%rsp)
753 vpshufd \$0xEE,$D2,$D4
754 vmovdqu `16*7-64`($ctx),$D1
755 vpshufd \$0x44,$D2,$D2
756 vmovdqa $D4,-0x40(%r11)
757 vmovdqa $D2,0x50(%rsp)
758 vpshufd \$0xEE,$D0,$D3
759 vmovdqu `16*8-64`($ctx),$D2
760 vpshufd \$0x44,$D0,$D0
761 vmovdqa $D3,-0x30(%r11)
762 vmovdqa $D0,0x60(%rsp)
763 vpshufd \$0xEE,$D1,$D4
764 vpshufd \$0x44,$D1,$D1
765 vmovdqa $D4,-0x20(%r11)
766 vmovdqa $D1,0x70(%rsp)
767 vpshufd \$0xEE,$D2,$D3
768 vmovdqa 0x00(%rsp),$D4 # preload r0^2
769 vpshufd \$0x44,$D2,$D2
770 vmovdqa $D3,-0x10(%r11)
771 vmovdqa $D2,0x80(%rsp)
777 ################################################################
778 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
779 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
780 # \___________________/
781 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
782 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
783 # \___________________/ \____________________/
785 # Note that we start with inp[2:3]*r^2. This is because it
786 # doesn't depend on reduction in previous iteration.
787 ################################################################
788 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
789 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
790 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
791 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
792 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
794 # though note that $Tx and $Hx are "reversed" in this section,
795 # and $D4 is preloaded with r0^2...
797 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
798 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
799 vmovdqa $H2,0x20(%r11) # offload hash
800 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
801 vmovdqa 0x10(%rsp),$H2 # r1^2
802 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
803 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
805 vmovdqa $H0,0x00(%r11) #
806 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
807 vmovdqa $H1,0x10(%r11) #
808 vpmuludq $T3,$H2,$H1 # h3*r1
809 vpaddq $H0,$D0,$D0 # d0 += h4*s1
810 vpaddq $H1,$D4,$D4 # d4 += h3*r1
811 vmovdqa $H3,0x30(%r11) #
812 vpmuludq $T2,$H2,$H0 # h2*r1
813 vpmuludq $T1,$H2,$H1 # h1*r1
814 vpaddq $H0,$D3,$D3 # d3 += h2*r1
815 vmovdqa 0x30(%rsp),$H3 # r2^2
816 vpaddq $H1,$D2,$D2 # d2 += h1*r1
817 vmovdqa $H4,0x40(%r11) #
818 vpmuludq $T0,$H2,$H2 # h0*r1
819 vpmuludq $T2,$H3,$H0 # h2*r2
820 vpaddq $H2,$D1,$D1 # d1 += h0*r1
822 vmovdqa 0x40(%rsp),$H4 # s2^2
823 vpaddq $H0,$D4,$D4 # d4 += h2*r2
824 vpmuludq $T1,$H3,$H1 # h1*r2
825 vpmuludq $T0,$H3,$H3 # h0*r2
826 vpaddq $H1,$D3,$D3 # d3 += h1*r2
827 vmovdqa 0x50(%rsp),$H2 # r3^2
828 vpaddq $H3,$D2,$D2 # d2 += h0*r2
829 vpmuludq $T4,$H4,$H0 # h4*s2
830 vpmuludq $T3,$H4,$H4 # h3*s2
831 vpaddq $H0,$D1,$D1 # d1 += h4*s2
832 vmovdqa 0x60(%rsp),$H3 # s3^2
833 vpaddq $H4,$D0,$D0 # d0 += h3*s2
835 vmovdqa 0x80(%rsp),$H4 # s4^2
836 vpmuludq $T1,$H2,$H1 # h1*r3
837 vpmuludq $T0,$H2,$H2 # h0*r3
838 vpaddq $H1,$D4,$D4 # d4 += h1*r3
839 vpaddq $H2,$D3,$D3 # d3 += h0*r3
840 vpmuludq $T4,$H3,$H0 # h4*s3
841 vpmuludq $T3,$H3,$H1 # h3*s3
842 vpaddq $H0,$D2,$D2 # d2 += h4*s3
843 vmovdqu 16*0($inp),$H0 # load input
844 vpaddq $H1,$D1,$D1 # d1 += h3*s3
845 vpmuludq $T2,$H3,$H3 # h2*s3
846 vpmuludq $T2,$H4,$T2 # h2*s4
847 vpaddq $H3,$D0,$D0 # d0 += h2*s3
849 vmovdqu 16*1($inp),$H1 #
850 vpaddq $T2,$D1,$D1 # d1 += h2*s4
851 vpmuludq $T3,$H4,$T3 # h3*s4
852 vpmuludq $T4,$H4,$T4 # h4*s4
853 vpsrldq \$6,$H0,$H2 # splat input
854 vpaddq $T3,$D2,$D2 # d2 += h3*s4
855 vpaddq $T4,$D3,$D3 # d3 += h4*s4
856 vpsrldq \$6,$H1,$H3 #
857 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
858 vpmuludq $T1,$H4,$T0 # h1*s4
859 vpunpckhqdq $H1,$H0,$H4 # 4
860 vpaddq $T4,$D4,$D4 # d4 += h0*r4
861 vmovdqa -0x90(%r11),$T4 # r0^4
862 vpaddq $T0,$D0,$D0 # d0 += h1*s4
864 vpunpcklqdq $H1,$H0,$H0 # 0:1
865 vpunpcklqdq $H3,$H2,$H3 # 2:3
867 #vpsrlq \$40,$H4,$H4 # 4
868 vpsrldq \$`40/8`,$H4,$H4 # 4
870 vpand $MASK,$H0,$H0 # 0
872 vpand $MASK,$H1,$H1 # 1
873 vpand 0(%rcx),$H4,$H4 # .Lmask24
875 vpand $MASK,$H2,$H2 # 2
876 vpand $MASK,$H3,$H3 # 3
877 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
879 vpaddq 0x00(%r11),$H0,$H0 # add hash value
880 vpaddq 0x10(%r11),$H1,$H1
881 vpaddq 0x20(%r11),$H2,$H2
882 vpaddq 0x30(%r11),$H3,$H3
883 vpaddq 0x40(%r11),$H4,$H4
890 ################################################################
891 # Now we accumulate (inp[0:1]+hash)*r^4
892 ################################################################
893 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
894 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
895 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
896 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
897 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
899 vpmuludq $H0,$T4,$T0 # h0*r0
900 vpmuludq $H1,$T4,$T1 # h1*r0
903 vmovdqa -0x80(%r11),$T2 # r1^4
904 vpmuludq $H2,$T4,$T0 # h2*r0
905 vpmuludq $H3,$T4,$T1 # h3*r0
908 vpmuludq $H4,$T4,$T4 # h4*r0
909 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
912 vpaddq $T0,$D0,$D0 # d0 += h4*s1
913 vpmuludq $H2,$T2,$T1 # h2*r1
914 vpmuludq $H3,$T2,$T0 # h3*r1
915 vpaddq $T1,$D3,$D3 # d3 += h2*r1
916 vmovdqa -0x60(%r11),$T3 # r2^4
917 vpaddq $T0,$D4,$D4 # d4 += h3*r1
918 vpmuludq $H1,$T2,$T1 # h1*r1
919 vpmuludq $H0,$T2,$T2 # h0*r1
920 vpaddq $T1,$D2,$D2 # d2 += h1*r1
921 vpaddq $T2,$D1,$D1 # d1 += h0*r1
923 vmovdqa -0x50(%r11),$T4 # s2^4
924 vpmuludq $H2,$T3,$T0 # h2*r2
925 vpmuludq $H1,$T3,$T1 # h1*r2
926 vpaddq $T0,$D4,$D4 # d4 += h2*r2
927 vpaddq $T1,$D3,$D3 # d3 += h1*r2
928 vmovdqa -0x40(%r11),$T2 # r3^4
929 vpmuludq $H0,$T3,$T3 # h0*r2
930 vpmuludq $H4,$T4,$T0 # h4*s2
931 vpaddq $T3,$D2,$D2 # d2 += h0*r2
932 vpaddq $T0,$D1,$D1 # d1 += h4*s2
933 vmovdqa -0x30(%r11),$T3 # s3^4
934 vpmuludq $H3,$T4,$T4 # h3*s2
935 vpmuludq $H1,$T2,$T1 # h1*r3
936 vpaddq $T4,$D0,$D0 # d0 += h3*s2
938 vmovdqa -0x10(%r11),$T4 # s4^4
939 vpaddq $T1,$D4,$D4 # d4 += h1*r3
940 vpmuludq $H0,$T2,$T2 # h0*r3
941 vpmuludq $H4,$T3,$T0 # h4*s3
942 vpaddq $T2,$D3,$D3 # d3 += h0*r3
943 vpaddq $T0,$D2,$D2 # d2 += h4*s3
944 vmovdqu 16*2($inp),$T0 # load input
945 vpmuludq $H3,$T3,$T2 # h3*s3
946 vpmuludq $H2,$T3,$T3 # h2*s3
947 vpaddq $T2,$D1,$D1 # d1 += h3*s3
948 vmovdqu 16*3($inp),$T1 #
949 vpaddq $T3,$D0,$D0 # d0 += h2*s3
951 vpmuludq $H2,$T4,$H2 # h2*s4
952 vpmuludq $H3,$T4,$H3 # h3*s4
953 vpsrldq \$6,$T0,$T2 # splat input
954 vpaddq $H2,$D1,$D1 # d1 += h2*s4
955 vpmuludq $H4,$T4,$H4 # h4*s4
956 vpsrldq \$6,$T1,$T3 #
957 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
958 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
959 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
961 vpunpckhqdq $T1,$T0,$T4 # 4
962 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
963 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
965 vpunpcklqdq $T1,$T0,$T0 # 0:1
966 vpunpcklqdq $T3,$T2,$T3 # 2:3
968 #vpsrlq \$40,$T4,$T4 # 4
969 vpsrldq \$`40/8`,$T4,$T4 # 4
971 vmovdqa 0x00(%rsp),$D4 # preload r0^2
972 vpand $MASK,$T0,$T0 # 0
974 vpand $MASK,$T1,$T1 # 1
975 vpand 0(%rcx),$T4,$T4 # .Lmask24
977 vpand $MASK,$T2,$T2 # 2
978 vpand $MASK,$T3,$T3 # 3
979 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
981 ################################################################
982 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
987 vpaddq $D3,$H4,$H4 # h3 -> h4
991 vpaddq $D0,$D1,$H1 # h0 -> h1
998 vpaddq $D1,$H2,$H2 # h1 -> h2
1002 vpaddq $D0,$H0,$H0 # h4 -> h0
1006 vpaddq $D2,$H3,$H3 # h2 -> h3
1010 vpaddq $D0,$H1,$H1 # h0 -> h1
1014 vpaddq $D3,$H4,$H4 # h3 -> h4
1019 ################################################################
1020 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1022 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1033 vmovdqa $H2,0x20(%r11)
1034 vmovdqa $H0,0x00(%r11)
1035 vmovdqa $H1,0x10(%r11)
1036 vmovdqa $H3,0x30(%r11)
1037 vmovdqa $H4,0x40(%r11)
1039 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1040 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1041 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1042 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1043 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1045 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1046 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1047 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1048 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1049 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1050 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1052 vpmuludq $T3,$H2,$H0 # h3*r1
1053 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1054 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1055 vpmuludq $T2,$H2,$H1 # h2*r1
1056 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1057 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1058 vpmuludq $T1,$H2,$H0 # h1*r1
1059 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1060 vpmuludq $T0,$H2,$H2 # h0*r1
1061 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1062 vpmuludq $T4,$H3,$H3 # h4*s1
1063 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1065 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1066 vpmuludq $T2,$H4,$H1 # h2*r2
1067 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1068 vpmuludq $T1,$H4,$H0 # h1*r2
1069 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1070 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1071 vpmuludq $T0,$H4,$H4 # h0*r2
1072 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1073 vpmuludq $T4,$H2,$H1 # h4*s2
1074 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1075 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1076 vpmuludq $T3,$H2,$H2 # h3*s2
1077 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1079 vpmuludq $T1,$H3,$H0 # h1*r3
1080 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1081 vpmuludq $T0,$H3,$H3 # h0*r3
1082 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1083 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1084 vpmuludq $T4,$H4,$H1 # h4*s3
1085 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1086 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1087 vpmuludq $T3,$H4,$H0 # h3*s3
1088 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1089 vpmuludq $T2,$H4,$H4 # h2*s3
1090 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1092 vpmuludq $T0,$H2,$H2 # h0*r4
1093 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1094 vpmuludq $T4,$H3,$H1 # h4*s4
1095 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1096 vpmuludq $T3,$H3,$H0 # h3*s4
1097 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1098 vpmuludq $T2,$H3,$H1 # h2*s4
1099 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1100 vpmuludq $T1,$H3,$H3 # h1*s4
1101 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1105 vmovdqu 16*0($inp),$H0 # load input
1106 vmovdqu 16*1($inp),$H1
1108 vpsrldq \$6,$H0,$H2 # splat input
1110 vpunpckhqdq $H1,$H0,$H4 # 4
1111 vpunpcklqdq $H1,$H0,$H0 # 0:1
1112 vpunpcklqdq $H3,$H2,$H3 # 2:3
1114 vpsrlq \$40,$H4,$H4 # 4
1116 vpand $MASK,$H0,$H0 # 0
1118 vpand $MASK,$H1,$H1 # 1
1120 vpand $MASK,$H2,$H2 # 2
1121 vpand $MASK,$H3,$H3 # 3
1122 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1124 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1125 vpaddq 0x00(%r11),$H0,$H0
1126 vpaddq 0x10(%r11),$H1,$H1
1127 vpaddq 0x20(%r11),$H2,$H2
1128 vpaddq 0x30(%r11),$H3,$H3
1129 vpaddq 0x40(%r11),$H4,$H4
1131 ################################################################
1132 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1134 vpmuludq $H0,$T4,$T0 # h0*r0
1135 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1136 vpmuludq $H1,$T4,$T1 # h1*r0
1137 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1138 vpmuludq $H2,$T4,$T0 # h2*r0
1139 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1140 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1141 vpmuludq $H3,$T4,$T1 # h3*r0
1142 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1143 vpmuludq $H4,$T4,$T4 # h4*r0
1144 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1146 vpmuludq $H3,$T2,$T0 # h3*r1
1147 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1148 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1149 vpmuludq $H2,$T2,$T1 # h2*r1
1150 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1151 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1152 vpmuludq $H1,$T2,$T0 # h1*r1
1153 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1154 vpmuludq $H0,$T2,$T2 # h0*r1
1155 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1156 vpmuludq $H4,$T3,$T3 # h4*s1
1157 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1159 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1160 vpmuludq $H2,$T4,$T1 # h2*r2
1161 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1162 vpmuludq $H1,$T4,$T0 # h1*r2
1163 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1164 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1165 vpmuludq $H0,$T4,$T4 # h0*r2
1166 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1167 vpmuludq $H4,$T2,$T1 # h4*s2
1168 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1169 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1170 vpmuludq $H3,$T2,$T2 # h3*s2
1171 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1173 vpmuludq $H1,$T3,$T0 # h1*r3
1174 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1175 vpmuludq $H0,$T3,$T3 # h0*r3
1176 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1177 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1178 vpmuludq $H4,$T4,$T1 # h4*s3
1179 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1180 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1181 vpmuludq $H3,$T4,$T0 # h3*s3
1182 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1183 vpmuludq $H2,$T4,$T4 # h2*s3
1184 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1186 vpmuludq $H0,$T2,$T2 # h0*r4
1187 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1188 vpmuludq $H4,$T3,$T1 # h4*s4
1189 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1190 vpmuludq $H3,$T3,$T0 # h3*s4
1191 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1192 vpmuludq $H2,$T3,$T1 # h2*s4
1193 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1194 vpmuludq $H1,$T3,$T3 # h1*s4
1195 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1198 ################################################################
1203 vpaddq $H3,$D4,$D4 # h3 -> h4
1207 vpaddq $H0,$D1,$D1 # h0 -> h1
1214 vpaddq $H1,$D2,$D2 # h1 -> h2
1218 vpaddq $H4,$D0,$D0 # h4 -> h0
1222 vpaddq $H2,$D3,$D3 # h2 -> h3
1226 vpaddq $H0,$D1,$D1 # h0 -> h1
1230 vpaddq $H3,$D4,$D4 # h3 -> h4
1232 ################################################################
1233 # horizontal addition
1246 vmovd $H0,`4*0-48-64`($ctx) # save partially reduced
1247 vmovd $H1,`4*1-48-64`($ctx)
1248 vmovd $H2,`4*2-48-64`($ctx)
1249 vmovd $H3,`4*3-48-64`($ctx)
1250 vmovd $H4,`4*4-48-64`($ctx)
1252 $code.=<<___ if ($win64);
1253 vmovdqa 0x50(%r11),%xmm6
1254 vmovdqa 0x60(%r11),%xmm7
1255 vmovdqa 0x70(%r11),%xmm8
1256 vmovdqa 0x80(%r11),%xmm9
1257 vmovdqa 0x90(%r11),%xmm10
1258 vmovdqa 0xa0(%r11),%xmm11
1259 vmovdqa 0xb0(%r11),%xmm12
1260 vmovdqa 0xc0(%r11),%xmm13
1261 vmovdqa 0xd0(%r11),%xmm14
1262 vmovdqa 0xe0(%r11),%xmm15
1266 $code.=<<___ if (!$win64);
1272 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1274 .type poly1305_emit_avx,\@function,3
1277 cmpl \$0,20($ctx) # is_base2_26?
1280 mov 0($ctx),%eax # load hash value base 2^26
1286 shl \$26,%rcx # base 2^26 -> base 2^64
1302 mov %r10,%rax # could be partially reduced, so reduce
1312 add \$5,%r8 # compare to modulus
1316 shr \$2,%r10 # did 130-bit value overfow?
1320 add 0($nonce),%rax # accumulate nonce
1322 mov %rax,0($mac) # write result
1326 .size poly1305_emit_avx,.-poly1305_emit_avx
1330 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1331 map("%ymm$_",(0..15));
1335 .type poly1305_blocks_avx2,\@function,4
1337 poly1305_blocks_avx2:
1338 mov 20($ctx),%r8d # is_base2_26
1364 mov $len,%r15 # reassign $len
1366 mov 0($ctx),$d1 # load hash value
1370 mov 24($ctx),$r0 # load r
1373 ################################# base 2^26 -> base 2^64
1376 mov $d2,$r1 # borrow $r1
1392 adc \$0,$h2 # can be partially reduced...
1394 mov \$-4,$d2 # ... so reduce
1406 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1408 .Lbase2_26_pre_avx2:
1409 add 0($inp),$h0 # accumulate input
1415 call __poly1305_block
1419 jnz .Lbase2_26_pre_avx2
1421 test $padbit,$padbit # if $padbit is zero,
1422 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1424 ################################# base 2^64 -> base 2^26
1431 and \$0x3ffffff,%rax # h[0]
1433 and \$0x3ffffff,%rdx # h[1]
1437 and \$0x3ffffff,$h0 # h[2]
1439 and \$0x3ffffff,$h1 # h[3]
1443 jz .Lstore_base2_26_avx2
1453 .Lstore_base2_64_avx2:
1456 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1460 .Lstore_base2_26_avx2:
1461 mov %rax#d,0($ctx) # store hash value base 2^26
1476 .Lblocks_avx2_epilogue:
1487 .Lbase2_64_avx2_body:
1489 mov $len,%r15 # reassign $len
1491 mov 24($ctx),$r0 # load r
1494 mov 0($ctx),$h0 # load hash value
1501 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1506 .Lbase2_64_pre_avx2:
1507 add 0($inp),$h0 # accumulate input
1513 call __poly1305_block
1517 jnz .Lbase2_64_pre_avx2
1520 ################################# base 2^64 -> base 2^26
1527 and \$0x3ffffff,%rax # h[0]
1529 and \$0x3ffffff,%rdx # h[1]
1533 and \$0x3ffffff,$h0 # h[2]
1535 and \$0x3ffffff,$h1 # h[3]
1543 movl \$1,20($ctx) # set is_base2_26
1545 call __poly1305_init_avx
1558 .Lbase2_64_avx2_epilogue:
1563 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1564 vmovd 4*1($ctx),%x#$H1
1565 vmovd 4*2($ctx),%x#$H2
1566 vmovd 4*3($ctx),%x#$H3
1567 vmovd 4*4($ctx),%x#$H4
1571 $code.=<<___ if (!$win64);
1575 $code.=<<___ if ($win64);
1576 lea -0xf8(%rsp),%r11
1578 vmovdqa %xmm6,0x50(%r11)
1579 vmovdqa %xmm7,0x60(%r11)
1580 vmovdqa %xmm8,0x70(%r11)
1581 vmovdqa %xmm9,0x80(%r11)
1582 vmovdqa %xmm10,0x90(%r11)
1583 vmovdqa %xmm11,0xa0(%r11)
1584 vmovdqa %xmm12,0xb0(%r11)
1585 vmovdqa %xmm13,0xc0(%r11)
1586 vmovdqa %xmm14,0xd0(%r11)
1587 vmovdqa %xmm15,0xe0(%r11)
1591 lea 48+64($ctx),$ctx # size optimization
1592 lea .Lconst(%rip),%rcx
1594 # expand and copy pre-calculated table to stack
1595 vmovdqu `16*0-64`($ctx),%x#$T2
1597 vmovdqu `16*1-64`($ctx),%x#$T3
1598 vmovdqu `16*2-64`($ctx),%x#$T4
1599 vmovdqu `16*3-64`($ctx),%x#$D0
1600 vmovdqu `16*4-64`($ctx),%x#$D1
1601 vmovdqu `16*5-64`($ctx),%x#$D2
1602 vmovdqu `16*6-64`($ctx),%x#$D3
1603 vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
1604 vmovdqu `16*7-64`($ctx),%x#$D4
1605 vpermq \$0x15,$T3,$T3
1606 vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
1607 vmovdqu `16*8-64`($ctx),%x#$MASK
1608 vpermq \$0x15,$T4,$T4
1609 vpshufd \$0xc8,$T3,$T3
1610 vmovdqa $T2,0x00(%rsp)
1611 vpermq \$0x15,$D0,$D0
1612 vpshufd \$0xc8,$T4,$T4
1613 vmovdqa $T3,0x20(%rsp)
1614 vpermq \$0x15,$D1,$D1
1615 vpshufd \$0xc8,$D0,$D0
1616 vmovdqa $T4,0x40(%rsp)
1617 vpermq \$0x15,$D2,$D2
1618 vpshufd \$0xc8,$D1,$D1
1619 vmovdqa $D0,0x60(%rsp)
1620 vpermq \$0x15,$D3,$D3
1621 vpshufd \$0xc8,$D2,$D2
1622 vmovdqa $D1,0x80(%rsp)
1623 vpermq \$0x15,$D4,$D4
1624 vpshufd \$0xc8,$D3,$D3
1625 vmovdqa $D2,0xa0(%rsp)
1626 vpermq \$0x15,$MASK,$MASK
1627 vpshufd \$0xc8,$D4,$D4
1628 vmovdqa $D3,0xc0(%rsp)
1629 vpshufd \$0xc8,$MASK,$MASK
1630 vmovdqa $D4,0xe0(%rsp)
1631 vmovdqa $MASK,0x100(%rsp)
1632 vmovdqa 64(%rcx),$MASK # .Lmask26
1634 ################################################################
1636 vmovdqu 16*0($inp),%x#$T0
1637 vmovdqu 16*1($inp),%x#$T1
1638 vinserti128 \$1,16*2($inp),$T0,$T0
1639 vinserti128 \$1,16*3($inp),$T1,$T1
1642 vpsrldq \$6,$T0,$T2 # splat input
1644 vpunpckhqdq $T1,$T0,$T4 # 4
1645 vpunpcklqdq $T3,$T2,$T2 # 2:3
1646 vpunpcklqdq $T1,$T0,$T0 # 0:1
1651 vpsrlq \$40,$T4,$T4 # 4
1652 vpand $MASK,$T2,$T2 # 2
1653 vpand $MASK,$T0,$T0 # 0
1654 vpand $MASK,$T1,$T1 # 1
1655 vpand $MASK,$T3,$T3 # 3
1656 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1658 lea 0x90(%rsp),%rax # size optimization
1659 vpaddq $H2,$T2,$H2 # accumulate input
1666 ################################################################
1667 # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1668 # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1669 # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1670 # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1671 # \________/\________/
1672 ################################################################
1673 #vpaddq $H2,$T2,$H2 # accumulate input
1675 vmovdqa `32*0`(%rsp),$T0 # r0^4
1677 vmovdqa `32*1`(%rsp),$T1 # r1^4
1679 vmovdqa `32*3`(%rsp),$T2 # r2^4
1681 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1682 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1684 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1685 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1686 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1687 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1688 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1690 # however, as h2 is "chronologically" first one available pull
1691 # corresponding operations up, so it's
1693 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1694 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1695 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1696 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1697 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1699 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1700 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1701 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1702 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1703 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1705 vpmuludq $H0,$T1,$T4 # h0*r1
1706 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1707 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1708 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1709 vpmuludq $H3,$T1,$T4 # h3*r1
1710 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1711 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1712 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1713 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1715 vpmuludq $H0,$T0,$T4 # h0*r0
1716 vpmuludq $H1,$T0,$H2 # h1*r0
1717 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1718 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1719 vpmuludq $H3,$T0,$T4 # h3*r0
1720 vpmuludq $H4,$T0,$H2 # h4*r0
1721 vmovdqu 16*0($inp),%x#$T0 # load input
1722 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1723 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1724 vinserti128 \$1,16*2($inp),$T0,$T0
1726 vpmuludq $H3,$T1,$T4 # h3*s2
1727 vpmuludq $H4,$T1,$H2 # h4*s2
1728 vmovdqu 16*1($inp),%x#$T1
1729 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1730 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1731 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1732 vpmuludq $H1,$T2,$T4 # h1*r2
1733 vpmuludq $H0,$T2,$T2 # h0*r2
1734 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1735 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1736 vinserti128 \$1,16*3($inp),$T1,$T1
1739 vpmuludq $H1,$H2,$T4 # h1*r3
1740 vpmuludq $H0,$H2,$H2 # h0*r3
1741 vpsrldq \$6,$T0,$T2 # splat input
1742 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1743 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1744 vpmuludq $H3,$T3,$T4 # h3*s3
1745 vpmuludq $H4,$T3,$H2 # h4*s3
1747 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1748 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1749 vpunpckhqdq $T1,$T0,$T4 # 4
1751 vpmuludq $H3,$S4,$H3 # h3*s4
1752 vpmuludq $H4,$S4,$H4 # h4*s4
1753 vpunpcklqdq $T1,$T0,$T0 # 0:1
1754 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1755 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1756 vpunpcklqdq $T3,$T2,$T3 # 2:3
1757 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1758 vpmuludq $H1,$S4,$H0 # h1*s4
1759 vmovdqa 64(%rcx),$MASK # .Lmask26
1760 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1761 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1763 ################################################################
1764 # lazy reduction (interleaved with tail of input splat)
1768 vpaddq $D3,$H4,$H4 # h3 -> h4
1772 vpaddq $D0,$D1,$H1 # h0 -> h1
1781 vpaddq $D1,$H2,$H2 # h1 -> h2
1785 vpaddq $D4,$H0,$H0 # h4 -> h0
1787 vpand $MASK,$T2,$T2 # 2
1792 vpaddq $D2,$H3,$H3 # h2 -> h3
1794 vpaddq $T2,$H2,$H2 # modulo-scheduled
1799 vpaddq $D0,$H1,$H1 # h0 -> h1
1801 vpsrlq \$40,$T4,$T4 # 4
1805 vpaddq $D3,$H4,$H4 # h3 -> h4
1807 vpand $MASK,$T0,$T0 # 0
1808 vpand $MASK,$T1,$T1 # 1
1809 vpand $MASK,$T3,$T3 # 3
1810 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1817 ################################################################
1818 # while above multiplications were by r^4 in all lanes, in last
1819 # iteration we multiply least significant lane by r^4 and most
1820 # significant one by r, so copy of above except that references
1821 # to the precomputed table are displaced by 4...
1823 #vpaddq $H2,$T2,$H2 # accumulate input
1825 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1827 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1829 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1831 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1832 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1834 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1835 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1836 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1837 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1838 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1840 vpmuludq $H0,$T1,$T4 # h0*r1
1841 vpmuludq $H1,$T1,$H2 # h1*r1
1842 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1843 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1844 vpmuludq $H3,$T1,$T4 # h3*r1
1845 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1846 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1847 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1849 vpmuludq $H0,$T0,$T4 # h0*r0
1850 vpmuludq $H1,$T0,$H2 # h1*r0
1851 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1852 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1853 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1854 vpmuludq $H3,$T0,$T4 # h3*r0
1855 vpmuludq $H4,$T0,$H2 # h4*r0
1856 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1857 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1859 vpmuludq $H3,$T1,$T4 # h3*s2
1860 vpmuludq $H4,$T1,$H2 # h4*s2
1861 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1862 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1863 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1864 vpmuludq $H1,$T2,$T4 # h1*r2
1865 vpmuludq $H0,$T2,$T2 # h0*r2
1866 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1867 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1869 vpmuludq $H1,$H2,$T4 # h1*r3
1870 vpmuludq $H0,$H2,$H2 # h0*r3
1871 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1872 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1873 vpmuludq $H3,$T3,$T4 # h3*s3
1874 vpmuludq $H4,$T3,$H2 # h4*s3
1875 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1876 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1878 vpmuludq $H3,$S4,$H3 # h3*s4
1879 vpmuludq $H4,$S4,$H4 # h4*s4
1880 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1881 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1882 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
1883 vpmuludq $H1,$S4,$H0 # h1*s4
1884 vmovdqa 64(%rcx),$MASK # .Lmask26
1885 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1886 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1888 ################################################################
1893 vpaddq $D3,$H4,$H4 # h3 -> h4
1897 vpaddq $D0,$D1,$H1 # h0 -> h1
1904 vpaddq $D1,$H2,$H2 # h1 -> h2
1908 vpaddq $D4,$H0,$H0 # h4 -> h0
1912 vpaddq $D2,$H3,$H3 # h2 -> h3
1916 vpaddq $D0,$H1,$H1 # h0 -> h1
1920 vpaddq $D3,$H4,$H4 # h3 -> h4
1922 ################################################################
1923 # horizontal addition
1936 vpermq \$0x2,$H2,$T2
1937 vpermq \$0x2,$H0,$T0
1938 vpermq \$0x2,$H1,$T1
1939 vpermq \$0x2,$H3,$T3
1940 vpermq \$0x2,$H4,$T4
1947 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1948 vmovd %x#$H1,`4*1-48-64`($ctx)
1949 vmovd %x#$H2,`4*2-48-64`($ctx)
1950 vmovd %x#$H3,`4*3-48-64`($ctx)
1951 vmovd %x#$H4,`4*4-48-64`($ctx)
1953 $code.=<<___ if ($win64);
1954 vmovdqa 0x50(%r11),%xmm6
1955 vmovdqa 0x60(%r11),%xmm7
1956 vmovdqa 0x70(%r11),%xmm8
1957 vmovdqa 0x80(%r11),%xmm9
1958 vmovdqa 0x90(%r11),%xmm10
1959 vmovdqa 0xa0(%r11),%xmm11
1960 vmovdqa 0xb0(%r11),%xmm12
1961 vmovdqa 0xc0(%r11),%xmm13
1962 vmovdqa 0xd0(%r11),%xmm14
1963 vmovdqa 0xe0(%r11),%xmm15
1967 $code.=<<___ if (!$win64);
1973 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
1980 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1982 .long 1<<24,0,1<<24,0,1<<24,0,1<<24,0
1984 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
1986 .long 5,0,5,0,5,0,5,0
1991 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1995 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1996 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2004 .extern __imp_RtlVirtualUnwind
2005 .type se_handler,\@abi-omnipotent
2019 mov 120($context),%rax # pull context->Rax
2020 mov 248($context),%rbx # pull context->Rip
2022 mov 8($disp),%rsi # disp->ImageBase
2023 mov 56($disp),%r11 # disp->HandlerData
2025 mov 0(%r11),%r10d # HandlerData[0]
2026 lea (%rsi,%r10),%r10 # prologue label
2027 cmp %r10,%rbx # context->Rip<.Lprologue
2028 jb .Lcommon_seh_tail
2030 mov 152($context),%rax # pull context->Rsp
2032 mov 4(%r11),%r10d # HandlerData[1]
2033 lea (%rsi,%r10),%r10 # epilogue label
2034 cmp %r10,%rbx # context->Rip>=.Lepilogue
2035 jae .Lcommon_seh_tail
2045 mov %rbx,144($context) # restore context->Rbx
2046 mov %rbp,160($context) # restore context->Rbp
2047 mov %r12,216($context) # restore context->R12
2048 mov %r13,224($context) # restore context->R13
2049 mov %r14,232($context) # restore context->R14
2050 mov %r15,240($context) # restore context->R14
2052 jmp .Lcommon_seh_tail
2053 .size se_handler,.-se_handler
2055 .type avx_handler,\@abi-omnipotent
2069 mov 120($context),%rax # pull context->Rax
2070 mov 248($context),%rbx # pull context->Rip
2072 mov 8($disp),%rsi # disp->ImageBase
2073 mov 56($disp),%r11 # disp->HandlerData
2075 mov 0(%r11),%r10d # HandlerData[0]
2076 lea (%rsi,%r10),%r10 # prologue label
2077 cmp %r10,%rbx # context->Rip<prologue label
2078 jb .Lcommon_seh_tail
2080 mov 152($context),%rax # pull context->Rsp
2082 mov 4(%r11),%r10d # HandlerData[1]
2083 lea (%rsi,%r10),%r10 # epilogue label
2084 cmp %r10,%rbx # context->Rip>=epilogue label
2085 jae .Lcommon_seh_tail
2087 mov 208($context),%rax # pull context->R11
2091 lea 512($context),%rdi # &context.Xmm6
2093 .long 0xa548f3fc # cld; rep movsq
2098 mov %rax,152($context) # restore context->Rsp
2099 mov %rsi,168($context) # restore context->Rsi
2100 mov %rdi,176($context) # restore context->Rdi
2102 mov 40($disp),%rdi # disp->ContextRecord
2103 mov $context,%rsi # context
2104 mov \$154,%ecx # sizeof(CONTEXT)
2105 .long 0xa548f3fc # cld; rep movsq
2108 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2109 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2110 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2111 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2112 mov 40(%rsi),%r10 # disp->ContextRecord
2113 lea 56(%rsi),%r11 # &disp->HandlerData
2114 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2115 mov %r10,32(%rsp) # arg5
2116 mov %r11,40(%rsp) # arg6
2117 mov %r12,48(%rsp) # arg7
2118 mov %rcx,56(%rsp) # arg8, (NULL)
2119 call *__imp_RtlVirtualUnwind(%rip)
2121 mov \$1,%eax # ExceptionContinueSearch
2133 .size avx_handler,.-avx_handler
2137 .rva .LSEH_begin_poly1305_init
2138 .rva .LSEH_end_poly1305_init
2139 .rva .LSEH_info_poly1305_init
2141 .rva .LSEH_begin_poly1305_blocks
2142 .rva .LSEH_end_poly1305_blocks
2143 .rva .LSEH_info_poly1305_blocks
2145 .rva .LSEH_begin_poly1305_emit
2146 .rva .LSEH_end_poly1305_emit
2147 .rva .LSEH_info_poly1305_emit
2149 $code.=<<___ if ($avx);
2150 .rva .LSEH_begin_poly1305_blocks_avx
2152 .rva .LSEH_info_poly1305_blocks_avx_1
2156 .rva .LSEH_info_poly1305_blocks_avx_2
2159 .rva .LSEH_end_poly1305_blocks_avx
2160 .rva .LSEH_info_poly1305_blocks_avx_3
2162 .rva .LSEH_begin_poly1305_emit_avx
2163 .rva .LSEH_end_poly1305_emit_avx
2164 .rva .LSEH_info_poly1305_emit_avx
2166 $code.=<<___ if ($avx>1);
2167 .rva .LSEH_begin_poly1305_blocks_avx2
2168 .rva .Lbase2_64_avx2
2169 .rva .LSEH_info_poly1305_blocks_avx2_1
2171 .rva .Lbase2_64_avx2
2173 .rva .LSEH_info_poly1305_blocks_avx2_2
2176 .rva .LSEH_end_poly1305_blocks_avx2
2177 .rva .LSEH_info_poly1305_blocks_avx2_3
2182 .LSEH_info_poly1305_init:
2185 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2187 .LSEH_info_poly1305_blocks:
2190 .rva .Lblocks_body,.Lblocks_epilogue
2192 .LSEH_info_poly1305_emit:
2195 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2197 $code.=<<___ if ($avx);
2198 .LSEH_info_poly1305_blocks_avx_1:
2201 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
2203 .LSEH_info_poly1305_blocks_avx_2:
2206 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
2208 .LSEH_info_poly1305_blocks_avx_3:
2211 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
2213 .LSEH_info_poly1305_emit_avx:
2216 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2218 $code.=<<___ if ($avx>1);
2219 .LSEH_info_poly1305_blocks_avx2_1:
2222 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
2224 .LSEH_info_poly1305_blocks_avx2_2:
2227 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
2229 .LSEH_info_poly1305_blocks_avx2_3:
2232 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
2236 foreach (split('\n',$code)) {
2237 s/\`([^\`]*)\`/eval($1)/ge;
2238 s/%r([a-z]+)#d/%e$1/g;
2239 s/%r([0-9]+)#d/%r$1d/g;