3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # This module implements Poly1305 hash for x86_64.
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
17 # IALU/gcc-4.8(*) AVX(**) AVX2
20 # Westmere 1.86/+120% -
21 # Sandy Bridge 1.39/+140% 1.10
22 # Haswell 1.10/+175% 1.11 0.65
23 # Skylake 1.12/+120% 0.96 0.51
24 # Silvermont 2.83/+95% -
25 # VIA Nano 1.82/+150% -
26 # Sledgehammer 1.38/+160% -
27 # Bulldozer 2.21/+130% 0.97
29 # (*) improvement coefficients relative to clang are more modest and
30 # are ~50% on most processors, in both cases we are comparing to
32 # (**) SSE2 implementation was attempted, but among non-AVX processors
33 # it was faster than integer-only code only on older Intel P4 and
34 # Core processors, 50-30%, less newer processor is, but slower on
35 # contemporary ones, for example almost 2x slower on Atom, and as
36 # former are naturally disappearing, SSE2 is deemed unnecessary;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
44 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
47 die "can't locate x86_64-xlate.pl";
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51 $avx = ($1>=2.19) + ($1>=2.22);
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
55 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56 $avx = ($1>=2.09) + ($1>=2.10);
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
60 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61 $avx = ($1>=10) + ($1>=12);
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65 $avx = ($2>=3.0) + ($2>3.0);
68 open OUT,"| \"$^X\" $xlate $flavour $output";
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len); # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
76 sub poly1305_iteration {
77 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output: $h0-$h2 *= $r0-$r1
86 mov %rax,$h0 # future $h0
96 mov $h2,$h1 # borrow $h1
100 imulq $s1,$h1 # h2*s1
105 imulq $r0,$h2 # h2*r0
107 mov \$-4,%rax # mask value
110 and $d3,%rax # last reduction step
120 ########################################################################
121 # Layout of opaque area is following.
123 # unsigned __int64 h[3]; # current hash value base 2^64
124 # unsigned __int64 r[2]; # key value base 2^64
129 .extern OPENSSL_ia32cap_P
132 .globl poly1305_blocks
134 .type poly1305_init,\@function,3
138 mov %rax,0($ctx) # initialize hash value
145 lea poly1305_blocks(%rip),%r10
146 lea poly1305_emit(%rip),%r11
148 $code.=<<___ if ($avx);
149 mov OPENSSL_ia32cap_P+4(%rip),%r9
150 lea poly1305_blocks_avx(%rip),%rax
151 lea poly1305_emit_avx(%rip),%rcx
152 bt \$`60-32`,%r9 # AVX?
156 $code.=<<___ if ($avx>1);
157 lea poly1305_blocks_avx2(%rip),%rax
158 bt \$`5+32`,%r9 # AVX2?
162 mov \$0x0ffffffc0fffffff,%rax
163 mov \$0x0ffffffc0ffffffc,%rcx
169 $code.=<<___ if ($flavour !~ /elf32/);
173 $code.=<<___ if ($flavour =~ /elf32/);
181 .size poly1305_init,.-poly1305_init
183 .type poly1305_blocks,\@function,4
187 sub \$16,$len # too short?
198 mov $len,%r15 # reassign $len
200 mov 24($ctx),$r0 # load r
203 mov 0($ctx),$h0 # load hash value
210 add $r1,$s1 # s1 = r1 + (r1 >> 2)
215 add 0($inp),$h0 # accumulate input
220 &poly1305_iteration();
223 sub \$16,%r15 # len-=16
226 mov $h0,0($ctx) # store hash value
240 .size poly1305_blocks,.-poly1305_blocks
242 .type poly1305_emit,\@function,3
246 mov 0($ctx),%r8 # load hash value
251 add \$5,%r8 # compare to modulus
255 shr \$2,%r10 # did 130-bit value overfow?
259 add 0($nonce),%rax # accumulate nonce
261 mov %rax,0($mac) # write result
265 .size poly1305_emit,.-poly1305_emit
269 ########################################################################
270 # Layout of opaque area is following.
272 # unsigned __int32 h[5]; # current hash value base 2^26
273 # unsigned __int32 is_base2_26;
274 # unsigned __int64 r[2]; # key value base 2^64
275 # unsigned __int64 pad;
276 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
278 # where r^n are base 2^26 digits of degrees of multiplier key. There are
279 # 5 digits, but last four are interleaved with multiples of 5, totalling
280 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
282 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
283 map("%xmm$_",(0..15));
286 .type __poly1305_block,\@abi-omnipotent
290 &poly1305_iteration();
293 .size __poly1305_block,.-__poly1305_block
295 .type __poly1305_init_avx,\@abi-omnipotent
302 lea 48+64($ctx),$ctx # size optimization
305 call __poly1305_block # r^2
307 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
313 mov %eax,`16*0+0-64`($ctx)
315 mov %edx,`16*0+4-64`($ctx)
322 mov %eax,`16*1+0-64`($ctx)
323 lea (%rax,%rax,4),%eax # *5
324 mov %edx,`16*1+4-64`($ctx)
325 lea (%rdx,%rdx,4),%edx # *5
326 mov %eax,`16*2+0-64`($ctx)
328 mov %edx,`16*2+4-64`($ctx)
339 mov %eax,`16*3+0-64`($ctx)
340 lea (%rax,%rax,4),%eax # *5
341 mov %edx,`16*3+4-64`($ctx)
342 lea (%rdx,%rdx,4),%edx # *5
343 mov %eax,`16*4+0-64`($ctx)
345 mov %edx,`16*4+4-64`($ctx)
354 mov %eax,`16*5+0-64`($ctx)
355 lea (%rax,%rax,4),%eax # *5
356 mov %edx,`16*5+4-64`($ctx)
357 lea (%rdx,%rdx,4),%edx # *5
358 mov %eax,`16*6+0-64`($ctx)
360 mov %edx,`16*6+4-64`($ctx)
366 mov $d1#d,`16*7+0-64`($ctx)
367 lea ($d1,$d1,4),$d1 # *5
368 mov $d2#d,`16*7+4-64`($ctx)
369 lea ($d2,$d2,4),$d2 # *5
370 mov $d1#d,`16*8+0-64`($ctx)
371 mov $d2#d,`16*8+4-64`($ctx)
374 call __poly1305_block # r^3
376 mov \$0x3ffffff,%eax # save r^3 base 2^26
380 mov %eax,`16*0+12-64`($ctx)
384 mov %edx,`16*1+12-64`($ctx)
385 lea (%rdx,%rdx,4),%edx # *5
387 mov %edx,`16*2+12-64`($ctx)
393 mov %eax,`16*3+12-64`($ctx)
394 lea (%rax,%rax,4),%eax # *5
396 mov %eax,`16*4+12-64`($ctx)
401 mov %edx,`16*5+12-64`($ctx)
402 lea (%rdx,%rdx,4),%edx # *5
404 mov %edx,`16*6+12-64`($ctx)
409 mov $d1#d,`16*7+12-64`($ctx)
410 lea ($d1,$d1,4),$d1 # *5
411 mov $d1#d,`16*8+12-64`($ctx)
414 call __poly1305_block # r^4
416 mov \$0x3ffffff,%eax # save r^4 base 2^26
420 mov %eax,`16*0+8-64`($ctx)
424 mov %edx,`16*1+8-64`($ctx)
425 lea (%rdx,%rdx,4),%edx # *5
427 mov %edx,`16*2+8-64`($ctx)
433 mov %eax,`16*3+8-64`($ctx)
434 lea (%rax,%rax,4),%eax # *5
436 mov %eax,`16*4+8-64`($ctx)
441 mov %edx,`16*5+8-64`($ctx)
442 lea (%rdx,%rdx,4),%edx # *5
444 mov %edx,`16*6+8-64`($ctx)
449 mov $d1#d,`16*7+8-64`($ctx)
450 lea ($d1,$d1,4),$d1 # *5
451 mov $d1#d,`16*8+8-64`($ctx)
453 lea -48-64($ctx),$ctx # size [de-]optimization
455 .size __poly1305_init_avx,.-__poly1305_init_avx
457 .type poly1305_blocks_avx,\@function,4
460 mov 20($ctx),%r8d # is_base2_26
486 mov $len,%r15 # reassign $len
488 mov 0($ctx),$d1 # load hash value
492 mov 24($ctx),$r0 # load r
495 ################################# base 2^26 -> base 2^64
498 mov $d2,$r1 # borrow $r1
514 adc \$0,$h2 # can be partially reduced...
516 mov \$-4,$d2 # ... so reduce
528 add $r1,$s1 # s1 = r1 + (r1 >> 2)
530 add 0($inp),$h0 # accumulate input
535 call __poly1305_block
537 test $padbit,$padbit # if $padbit is zero,
538 jz .Lstore_base2_64_avx # store hash in base 2^64 format
540 ################################# base 2^64 -> base 2^26
547 and \$0x3ffffff,%rax # h[0]
549 and \$0x3ffffff,%rdx # h[1]
553 and \$0x3ffffff,$h0 # h[2]
555 and \$0x3ffffff,$h1 # h[3]
559 jz .Lstore_base2_26_avx
569 .Lstore_base2_64_avx:
572 mov $h2,16($ctx) # note that is_base2_26 is zeroed
576 .Lstore_base2_26_avx:
577 mov %rax#d,0($ctx) # store hash value base 2^26
592 .Lblocks_avx_epilogue:
605 mov $len,%r15 # reassign $len
607 mov 24($ctx),$r0 # load r
610 mov 0($ctx),$h0 # load hash value
617 add $r1,$s1 # s1 = r1 + (r1 >> 2)
622 add 0($inp),$h0 # accumulate input
628 call __poly1305_block
631 ################################# base 2^64 -> base 2^26
638 and \$0x3ffffff,%rax # h[0]
640 and \$0x3ffffff,%rdx # h[1]
644 and \$0x3ffffff,$h0 # h[2]
646 and \$0x3ffffff,$h1 # h[3]
654 movl \$1,20($ctx) # set is_base2_26
656 call __poly1305_init_avx
669 .Lbase2_64_avx_epilogue:
674 vmovd 4*0($ctx),$H0 # load hash value
682 $code.=<<___ if (!$win64);
686 $code.=<<___ if ($win64);
689 vmovdqa %xmm6,0x50(%r11)
690 vmovdqa %xmm7,0x60(%r11)
691 vmovdqa %xmm8,0x70(%r11)
692 vmovdqa %xmm9,0x80(%r11)
693 vmovdqa %xmm10,0x90(%r11)
694 vmovdqa %xmm11,0xa0(%r11)
695 vmovdqa %xmm12,0xb0(%r11)
696 vmovdqa %xmm13,0xc0(%r11)
697 vmovdqa %xmm14,0xd0(%r11)
698 vmovdqa %xmm15,0xe0(%r11)
706 vmovdqu `16*3`($ctx),$D4 # preload r0^2
707 lea `16*3+64`($ctx),$ctx # size optimization
708 lea .Lconst(%rip),%rcx
710 ################################################################
712 vmovdqu 16*2($inp),$T0
713 vmovdqu 16*3($inp),$T1
714 vmovdqa 64(%rcx),$MASK # .Lmask26
716 vpsrldq \$6,$T0,$T2 # splat input
718 vpunpckhqdq $T1,$T0,$T4 # 4
719 vpunpcklqdq $T1,$T0,$T0 # 0:1
720 vpunpcklqdq $T3,$T2,$T3 # 2:3
722 vpsrlq \$40,$T4,$T4 # 4
724 vpand $MASK,$T0,$T0 # 0
726 vpand $MASK,$T1,$T1 # 1
728 vpand $MASK,$T2,$T2 # 2
729 vpand $MASK,$T3,$T3 # 3
730 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
734 # expand and copy pre-calculated table to stack
735 vmovdqu `16*1-64`($ctx),$D1
736 vmovdqu `16*2-64`($ctx),$D2
737 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
738 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
739 vmovdqa $D3,-0x90(%r11)
740 vmovdqa $D0,0x00(%rsp)
741 vpshufd \$0xEE,$D1,$D4
742 vmovdqu `16*3-64`($ctx),$D0
743 vpshufd \$0x44,$D1,$D1
744 vmovdqa $D4,-0x80(%r11)
745 vmovdqa $D1,0x10(%rsp)
746 vpshufd \$0xEE,$D2,$D3
747 vmovdqu `16*4-64`($ctx),$D1
748 vpshufd \$0x44,$D2,$D2
749 vmovdqa $D3,-0x70(%r11)
750 vmovdqa $D2,0x20(%rsp)
751 vpshufd \$0xEE,$D0,$D4
752 vmovdqu `16*5-64`($ctx),$D2
753 vpshufd \$0x44,$D0,$D0
754 vmovdqa $D4,-0x60(%r11)
755 vmovdqa $D0,0x30(%rsp)
756 vpshufd \$0xEE,$D1,$D3
757 vmovdqu `16*6-64`($ctx),$D0
758 vpshufd \$0x44,$D1,$D1
759 vmovdqa $D3,-0x50(%r11)
760 vmovdqa $D1,0x40(%rsp)
761 vpshufd \$0xEE,$D2,$D4
762 vmovdqu `16*7-64`($ctx),$D1
763 vpshufd \$0x44,$D2,$D2
764 vmovdqa $D4,-0x40(%r11)
765 vmovdqa $D2,0x50(%rsp)
766 vpshufd \$0xEE,$D0,$D3
767 vmovdqu `16*8-64`($ctx),$D2
768 vpshufd \$0x44,$D0,$D0
769 vmovdqa $D3,-0x30(%r11)
770 vmovdqa $D0,0x60(%rsp)
771 vpshufd \$0xEE,$D1,$D4
772 vpshufd \$0x44,$D1,$D1
773 vmovdqa $D4,-0x20(%r11)
774 vmovdqa $D1,0x70(%rsp)
775 vpshufd \$0xEE,$D2,$D3
776 vmovdqa 0x00(%rsp),$D4 # preload r0^2
777 vpshufd \$0x44,$D2,$D2
778 vmovdqa $D3,-0x10(%r11)
779 vmovdqa $D2,0x80(%rsp)
785 ################################################################
786 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
787 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
788 # \___________________/
789 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
790 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
791 # \___________________/ \____________________/
793 # Note that we start with inp[2:3]*r^2. This is because it
794 # doesn't depend on reduction in previous iteration.
795 ################################################################
796 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
797 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
798 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
799 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
800 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
802 # though note that $Tx and $Hx are "reversed" in this section,
803 # and $D4 is preloaded with r0^2...
805 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
806 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
807 vmovdqa $H2,0x20(%r11) # offload hash
808 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
809 vmovdqa 0x10(%rsp),$H2 # r1^2
810 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
811 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
813 vmovdqa $H0,0x00(%r11) #
814 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
815 vmovdqa $H1,0x10(%r11) #
816 vpmuludq $T3,$H2,$H1 # h3*r1
817 vpaddq $H0,$D0,$D0 # d0 += h4*s1
818 vpaddq $H1,$D4,$D4 # d4 += h3*r1
819 vmovdqa $H3,0x30(%r11) #
820 vpmuludq $T2,$H2,$H0 # h2*r1
821 vpmuludq $T1,$H2,$H1 # h1*r1
822 vpaddq $H0,$D3,$D3 # d3 += h2*r1
823 vmovdqa 0x30(%rsp),$H3 # r2^2
824 vpaddq $H1,$D2,$D2 # d2 += h1*r1
825 vmovdqa $H4,0x40(%r11) #
826 vpmuludq $T0,$H2,$H2 # h0*r1
827 vpmuludq $T2,$H3,$H0 # h2*r2
828 vpaddq $H2,$D1,$D1 # d1 += h0*r1
830 vmovdqa 0x40(%rsp),$H4 # s2^2
831 vpaddq $H0,$D4,$D4 # d4 += h2*r2
832 vpmuludq $T1,$H3,$H1 # h1*r2
833 vpmuludq $T0,$H3,$H3 # h0*r2
834 vpaddq $H1,$D3,$D3 # d3 += h1*r2
835 vmovdqa 0x50(%rsp),$H2 # r3^2
836 vpaddq $H3,$D2,$D2 # d2 += h0*r2
837 vpmuludq $T4,$H4,$H0 # h4*s2
838 vpmuludq $T3,$H4,$H4 # h3*s2
839 vpaddq $H0,$D1,$D1 # d1 += h4*s2
840 vmovdqa 0x60(%rsp),$H3 # s3^2
841 vpaddq $H4,$D0,$D0 # d0 += h3*s2
843 vmovdqa 0x80(%rsp),$H4 # s4^2
844 vpmuludq $T1,$H2,$H1 # h1*r3
845 vpmuludq $T0,$H2,$H2 # h0*r3
846 vpaddq $H1,$D4,$D4 # d4 += h1*r3
847 vpaddq $H2,$D3,$D3 # d3 += h0*r3
848 vpmuludq $T4,$H3,$H0 # h4*s3
849 vpmuludq $T3,$H3,$H1 # h3*s3
850 vpaddq $H0,$D2,$D2 # d2 += h4*s3
851 vmovdqu 16*0($inp),$H0 # load input
852 vpaddq $H1,$D1,$D1 # d1 += h3*s3
853 vpmuludq $T2,$H3,$H3 # h2*s3
854 vpmuludq $T2,$H4,$T2 # h2*s4
855 vpaddq $H3,$D0,$D0 # d0 += h2*s3
857 vmovdqu 16*1($inp),$H1 #
858 vpaddq $T2,$D1,$D1 # d1 += h2*s4
859 vpmuludq $T3,$H4,$T3 # h3*s4
860 vpmuludq $T4,$H4,$T4 # h4*s4
861 vpsrldq \$6,$H0,$H2 # splat input
862 vpaddq $T3,$D2,$D2 # d2 += h3*s4
863 vpaddq $T4,$D3,$D3 # d3 += h4*s4
864 vpsrldq \$6,$H1,$H3 #
865 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
866 vpmuludq $T1,$H4,$T0 # h1*s4
867 vpunpckhqdq $H1,$H0,$H4 # 4
868 vpaddq $T4,$D4,$D4 # d4 += h0*r4
869 vmovdqa -0x90(%r11),$T4 # r0^4
870 vpaddq $T0,$D0,$D0 # d0 += h1*s4
872 vpunpcklqdq $H1,$H0,$H0 # 0:1
873 vpunpcklqdq $H3,$H2,$H3 # 2:3
875 #vpsrlq \$40,$H4,$H4 # 4
876 vpsrldq \$`40/8`,$H4,$H4 # 4
878 vpand $MASK,$H0,$H0 # 0
880 vpand $MASK,$H1,$H1 # 1
881 vpand 0(%rcx),$H4,$H4 # .Lmask24
883 vpand $MASK,$H2,$H2 # 2
884 vpand $MASK,$H3,$H3 # 3
885 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
887 vpaddq 0x00(%r11),$H0,$H0 # add hash value
888 vpaddq 0x10(%r11),$H1,$H1
889 vpaddq 0x20(%r11),$H2,$H2
890 vpaddq 0x30(%r11),$H3,$H3
891 vpaddq 0x40(%r11),$H4,$H4
898 ################################################################
899 # Now we accumulate (inp[0:1]+hash)*r^4
900 ################################################################
901 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
902 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
903 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
904 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
905 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
907 vpmuludq $H0,$T4,$T0 # h0*r0
908 vpmuludq $H1,$T4,$T1 # h1*r0
911 vmovdqa -0x80(%r11),$T2 # r1^4
912 vpmuludq $H2,$T4,$T0 # h2*r0
913 vpmuludq $H3,$T4,$T1 # h3*r0
916 vpmuludq $H4,$T4,$T4 # h4*r0
917 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
920 vpaddq $T0,$D0,$D0 # d0 += h4*s1
921 vpmuludq $H2,$T2,$T1 # h2*r1
922 vpmuludq $H3,$T2,$T0 # h3*r1
923 vpaddq $T1,$D3,$D3 # d3 += h2*r1
924 vmovdqa -0x60(%r11),$T3 # r2^4
925 vpaddq $T0,$D4,$D4 # d4 += h3*r1
926 vpmuludq $H1,$T2,$T1 # h1*r1
927 vpmuludq $H0,$T2,$T2 # h0*r1
928 vpaddq $T1,$D2,$D2 # d2 += h1*r1
929 vpaddq $T2,$D1,$D1 # d1 += h0*r1
931 vmovdqa -0x50(%r11),$T4 # s2^4
932 vpmuludq $H2,$T3,$T0 # h2*r2
933 vpmuludq $H1,$T3,$T1 # h1*r2
934 vpaddq $T0,$D4,$D4 # d4 += h2*r2
935 vpaddq $T1,$D3,$D3 # d3 += h1*r2
936 vmovdqa -0x40(%r11),$T2 # r3^4
937 vpmuludq $H0,$T3,$T3 # h0*r2
938 vpmuludq $H4,$T4,$T0 # h4*s2
939 vpaddq $T3,$D2,$D2 # d2 += h0*r2
940 vpaddq $T0,$D1,$D1 # d1 += h4*s2
941 vmovdqa -0x30(%r11),$T3 # s3^4
942 vpmuludq $H3,$T4,$T4 # h3*s2
943 vpmuludq $H1,$T2,$T1 # h1*r3
944 vpaddq $T4,$D0,$D0 # d0 += h3*s2
946 vmovdqa -0x10(%r11),$T4 # s4^4
947 vpaddq $T1,$D4,$D4 # d4 += h1*r3
948 vpmuludq $H0,$T2,$T2 # h0*r3
949 vpmuludq $H4,$T3,$T0 # h4*s3
950 vpaddq $T2,$D3,$D3 # d3 += h0*r3
951 vpaddq $T0,$D2,$D2 # d2 += h4*s3
952 vmovdqu 16*2($inp),$T0 # load input
953 vpmuludq $H3,$T3,$T2 # h3*s3
954 vpmuludq $H2,$T3,$T3 # h2*s3
955 vpaddq $T2,$D1,$D1 # d1 += h3*s3
956 vmovdqu 16*3($inp),$T1 #
957 vpaddq $T3,$D0,$D0 # d0 += h2*s3
959 vpmuludq $H2,$T4,$H2 # h2*s4
960 vpmuludq $H3,$T4,$H3 # h3*s4
961 vpsrldq \$6,$T0,$T2 # splat input
962 vpaddq $H2,$D1,$D1 # d1 += h2*s4
963 vpmuludq $H4,$T4,$H4 # h4*s4
964 vpsrldq \$6,$T1,$T3 #
965 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
966 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
967 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
969 vpunpckhqdq $T1,$T0,$T4 # 4
970 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
971 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
973 vpunpcklqdq $T1,$T0,$T0 # 0:1
974 vpunpcklqdq $T3,$T2,$T3 # 2:3
976 #vpsrlq \$40,$T4,$T4 # 4
977 vpsrldq \$`40/8`,$T4,$T4 # 4
979 vmovdqa 0x00(%rsp),$D4 # preload r0^2
980 vpand $MASK,$T0,$T0 # 0
982 vpand $MASK,$T1,$T1 # 1
983 vpand 0(%rcx),$T4,$T4 # .Lmask24
985 vpand $MASK,$T2,$T2 # 2
986 vpand $MASK,$T3,$T3 # 3
987 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
989 ################################################################
990 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
995 vpaddq $D3,$H4,$H4 # h3 -> h4
999 vpaddq $D0,$D1,$H1 # h0 -> h1
1006 vpaddq $D1,$H2,$H2 # h1 -> h2
1010 vpaddq $D0,$H0,$H0 # h4 -> h0
1014 vpaddq $D2,$H3,$H3 # h2 -> h3
1018 vpaddq $D0,$H1,$H1 # h0 -> h1
1022 vpaddq $D3,$H4,$H4 # h3 -> h4
1027 ################################################################
1028 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1030 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1041 vmovdqa $H2,0x20(%r11)
1042 vmovdqa $H0,0x00(%r11)
1043 vmovdqa $H1,0x10(%r11)
1044 vmovdqa $H3,0x30(%r11)
1045 vmovdqa $H4,0x40(%r11)
1047 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1048 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1049 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1050 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1051 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1053 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1054 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1055 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1056 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1057 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1058 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1060 vpmuludq $T3,$H2,$H0 # h3*r1
1061 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1062 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1063 vpmuludq $T2,$H2,$H1 # h2*r1
1064 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1065 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1066 vpmuludq $T1,$H2,$H0 # h1*r1
1067 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1068 vpmuludq $T0,$H2,$H2 # h0*r1
1069 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1070 vpmuludq $T4,$H3,$H3 # h4*s1
1071 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1073 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1074 vpmuludq $T2,$H4,$H1 # h2*r2
1075 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1076 vpmuludq $T1,$H4,$H0 # h1*r2
1077 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1078 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1079 vpmuludq $T0,$H4,$H4 # h0*r2
1080 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1081 vpmuludq $T4,$H2,$H1 # h4*s2
1082 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1083 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1084 vpmuludq $T3,$H2,$H2 # h3*s2
1085 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1087 vpmuludq $T1,$H3,$H0 # h1*r3
1088 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1089 vpmuludq $T0,$H3,$H3 # h0*r3
1090 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1091 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1092 vpmuludq $T4,$H4,$H1 # h4*s3
1093 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1094 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1095 vpmuludq $T3,$H4,$H0 # h3*s3
1096 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1097 vpmuludq $T2,$H4,$H4 # h2*s3
1098 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1100 vpmuludq $T0,$H2,$H2 # h0*r4
1101 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1102 vpmuludq $T4,$H3,$H1 # h4*s4
1103 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1104 vpmuludq $T3,$H3,$H0 # h3*s4
1105 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1106 vpmuludq $T2,$H3,$H1 # h2*s4
1107 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1108 vpmuludq $T1,$H3,$H3 # h1*s4
1109 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1113 vmovdqu 16*0($inp),$H0 # load input
1114 vmovdqu 16*1($inp),$H1
1116 vpsrldq \$6,$H0,$H2 # splat input
1118 vpunpckhqdq $H1,$H0,$H4 # 4
1119 vpunpcklqdq $H1,$H0,$H0 # 0:1
1120 vpunpcklqdq $H3,$H2,$H3 # 2:3
1122 vpsrlq \$40,$H4,$H4 # 4
1124 vpand $MASK,$H0,$H0 # 0
1126 vpand $MASK,$H1,$H1 # 1
1128 vpand $MASK,$H2,$H2 # 2
1129 vpand $MASK,$H3,$H3 # 3
1130 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1132 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1133 vpaddq 0x00(%r11),$H0,$H0
1134 vpaddq 0x10(%r11),$H1,$H1
1135 vpaddq 0x20(%r11),$H2,$H2
1136 vpaddq 0x30(%r11),$H3,$H3
1137 vpaddq 0x40(%r11),$H4,$H4
1139 ################################################################
1140 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1142 vpmuludq $H0,$T4,$T0 # h0*r0
1143 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1144 vpmuludq $H1,$T4,$T1 # h1*r0
1145 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1146 vpmuludq $H2,$T4,$T0 # h2*r0
1147 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1148 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1149 vpmuludq $H3,$T4,$T1 # h3*r0
1150 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1151 vpmuludq $H4,$T4,$T4 # h4*r0
1152 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1154 vpmuludq $H3,$T2,$T0 # h3*r1
1155 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1156 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1157 vpmuludq $H2,$T2,$T1 # h2*r1
1158 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1159 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1160 vpmuludq $H1,$T2,$T0 # h1*r1
1161 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1162 vpmuludq $H0,$T2,$T2 # h0*r1
1163 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1164 vpmuludq $H4,$T3,$T3 # h4*s1
1165 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1167 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1168 vpmuludq $H2,$T4,$T1 # h2*r2
1169 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1170 vpmuludq $H1,$T4,$T0 # h1*r2
1171 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1172 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1173 vpmuludq $H0,$T4,$T4 # h0*r2
1174 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1175 vpmuludq $H4,$T2,$T1 # h4*s2
1176 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1177 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1178 vpmuludq $H3,$T2,$T2 # h3*s2
1179 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1181 vpmuludq $H1,$T3,$T0 # h1*r3
1182 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1183 vpmuludq $H0,$T3,$T3 # h0*r3
1184 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1185 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1186 vpmuludq $H4,$T4,$T1 # h4*s3
1187 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1188 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1189 vpmuludq $H3,$T4,$T0 # h3*s3
1190 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1191 vpmuludq $H2,$T4,$T4 # h2*s3
1192 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1194 vpmuludq $H0,$T2,$T2 # h0*r4
1195 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1196 vpmuludq $H4,$T3,$T1 # h4*s4
1197 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1198 vpmuludq $H3,$T3,$T0 # h3*s4
1199 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1200 vpmuludq $H2,$T3,$T1 # h2*s4
1201 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1202 vpmuludq $H1,$T3,$T3 # h1*s4
1203 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1206 ################################################################
1207 # horizontal addition
1220 ################################################################
1225 vpaddq $H3,$D4,$D4 # h3 -> h4
1229 vpaddq $H0,$D1,$D1 # h0 -> h1
1236 vpaddq $H1,$D2,$D2 # h1 -> h2
1240 vpaddq $H4,$D0,$D0 # h4 -> h0
1244 vpaddq $H2,$D3,$D3 # h2 -> h3
1248 vpaddq $H0,$D1,$D1 # h0 -> h1
1252 vpaddq $H3,$D4,$D4 # h3 -> h4
1254 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1255 vmovd $D1,`4*1-48-64`($ctx)
1256 vmovd $D2,`4*2-48-64`($ctx)
1257 vmovd $D3,`4*3-48-64`($ctx)
1258 vmovd $D4,`4*4-48-64`($ctx)
1260 $code.=<<___ if ($win64);
1261 vmovdqa 0x50(%r11),%xmm6
1262 vmovdqa 0x60(%r11),%xmm7
1263 vmovdqa 0x70(%r11),%xmm8
1264 vmovdqa 0x80(%r11),%xmm9
1265 vmovdqa 0x90(%r11),%xmm10
1266 vmovdqa 0xa0(%r11),%xmm11
1267 vmovdqa 0xb0(%r11),%xmm12
1268 vmovdqa 0xc0(%r11),%xmm13
1269 vmovdqa 0xd0(%r11),%xmm14
1270 vmovdqa 0xe0(%r11),%xmm15
1274 $code.=<<___ if (!$win64);
1280 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1282 .type poly1305_emit_avx,\@function,3
1285 cmpl \$0,20($ctx) # is_base2_26?
1288 mov 0($ctx),%eax # load hash value base 2^26
1294 shl \$26,%rcx # base 2^26 -> base 2^64
1310 mov %r10,%rax # could be partially reduced, so reduce
1320 add \$5,%r8 # compare to modulus
1324 shr \$2,%r10 # did 130-bit value overfow?
1328 add 0($nonce),%rax # accumulate nonce
1330 mov %rax,0($mac) # write result
1334 .size poly1305_emit_avx,.-poly1305_emit_avx
1338 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1339 map("%ymm$_",(0..15));
1343 .type poly1305_blocks_avx2,\@function,4
1345 poly1305_blocks_avx2:
1346 mov 20($ctx),%r8d # is_base2_26
1372 mov $len,%r15 # reassign $len
1374 mov 0($ctx),$d1 # load hash value
1378 mov 24($ctx),$r0 # load r
1381 ################################# base 2^26 -> base 2^64
1384 mov $d2,$r1 # borrow $r1
1400 adc \$0,$h2 # can be partially reduced...
1402 mov \$-4,$d2 # ... so reduce
1414 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1416 .Lbase2_26_pre_avx2:
1417 add 0($inp),$h0 # accumulate input
1423 call __poly1305_block
1427 jnz .Lbase2_26_pre_avx2
1429 test $padbit,$padbit # if $padbit is zero,
1430 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1432 ################################# base 2^64 -> base 2^26
1439 and \$0x3ffffff,%rax # h[0]
1441 and \$0x3ffffff,%rdx # h[1]
1445 and \$0x3ffffff,$h0 # h[2]
1447 and \$0x3ffffff,$h1 # h[3]
1451 jz .Lstore_base2_26_avx2
1461 .Lstore_base2_64_avx2:
1464 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1468 .Lstore_base2_26_avx2:
1469 mov %rax#d,0($ctx) # store hash value base 2^26
1484 .Lblocks_avx2_epilogue:
1495 .Lbase2_64_avx2_body:
1497 mov $len,%r15 # reassign $len
1499 mov 24($ctx),$r0 # load r
1502 mov 0($ctx),$h0 # load hash value
1509 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1514 .Lbase2_64_pre_avx2:
1515 add 0($inp),$h0 # accumulate input
1521 call __poly1305_block
1525 jnz .Lbase2_64_pre_avx2
1528 ################################# base 2^64 -> base 2^26
1535 and \$0x3ffffff,%rax # h[0]
1537 and \$0x3ffffff,%rdx # h[1]
1541 and \$0x3ffffff,$h0 # h[2]
1543 and \$0x3ffffff,$h1 # h[3]
1551 movl \$1,20($ctx) # set is_base2_26
1553 call __poly1305_init_avx
1566 .Lbase2_64_avx2_epilogue:
1571 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1572 vmovd 4*1($ctx),%x#$H1
1573 vmovd 4*2($ctx),%x#$H2
1574 vmovd 4*3($ctx),%x#$H3
1575 vmovd 4*4($ctx),%x#$H4
1579 $code.=<<___ if (!$win64);
1583 $code.=<<___ if ($win64);
1584 lea -0xf8(%rsp),%r11
1586 vmovdqa %xmm6,0x50(%r11)
1587 vmovdqa %xmm7,0x60(%r11)
1588 vmovdqa %xmm8,0x70(%r11)
1589 vmovdqa %xmm9,0x80(%r11)
1590 vmovdqa %xmm10,0x90(%r11)
1591 vmovdqa %xmm11,0xa0(%r11)
1592 vmovdqa %xmm12,0xb0(%r11)
1593 vmovdqa %xmm13,0xc0(%r11)
1594 vmovdqa %xmm14,0xd0(%r11)
1595 vmovdqa %xmm15,0xe0(%r11)
1599 lea 48+64($ctx),$ctx # size optimization
1600 lea .Lconst(%rip),%rcx
1602 # expand and copy pre-calculated table to stack
1603 vmovdqu `16*0-64`($ctx),%x#$T2
1605 vmovdqu `16*1-64`($ctx),%x#$T3
1606 vmovdqu `16*2-64`($ctx),%x#$T4
1607 vmovdqu `16*3-64`($ctx),%x#$D0
1608 vmovdqu `16*4-64`($ctx),%x#$D1
1609 vmovdqu `16*5-64`($ctx),%x#$D2
1610 vmovdqu `16*6-64`($ctx),%x#$D3
1611 vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
1612 vmovdqu `16*7-64`($ctx),%x#$D4
1613 vpermq \$0x15,$T3,$T3
1614 vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
1615 vmovdqu `16*8-64`($ctx),%x#$MASK
1616 vpermq \$0x15,$T4,$T4
1617 vpshufd \$0xc8,$T3,$T3
1618 vmovdqa $T2,0x00(%rsp)
1619 vpermq \$0x15,$D0,$D0
1620 vpshufd \$0xc8,$T4,$T4
1621 vmovdqa $T3,0x20(%rsp)
1622 vpermq \$0x15,$D1,$D1
1623 vpshufd \$0xc8,$D0,$D0
1624 vmovdqa $T4,0x40(%rsp)
1625 vpermq \$0x15,$D2,$D2
1626 vpshufd \$0xc8,$D1,$D1
1627 vmovdqa $D0,0x60(%rsp)
1628 vpermq \$0x15,$D3,$D3
1629 vpshufd \$0xc8,$D2,$D2
1630 vmovdqa $D1,0x80(%rsp)
1631 vpermq \$0x15,$D4,$D4
1632 vpshufd \$0xc8,$D3,$D3
1633 vmovdqa $D2,0xa0(%rsp)
1634 vpermq \$0x15,$MASK,$MASK
1635 vpshufd \$0xc8,$D4,$D4
1636 vmovdqa $D3,0xc0(%rsp)
1637 vpshufd \$0xc8,$MASK,$MASK
1638 vmovdqa $D4,0xe0(%rsp)
1639 vmovdqa $MASK,0x100(%rsp)
1640 vmovdqa 64(%rcx),$MASK # .Lmask26
1642 ################################################################
1644 vmovdqu 16*0($inp),%x#$T0
1645 vmovdqu 16*1($inp),%x#$T1
1646 vinserti128 \$1,16*2($inp),$T0,$T0
1647 vinserti128 \$1,16*3($inp),$T1,$T1
1650 vpsrldq \$6,$T0,$T2 # splat input
1652 vpunpckhqdq $T1,$T0,$T4 # 4
1653 vpunpcklqdq $T3,$T2,$T2 # 2:3
1654 vpunpcklqdq $T1,$T0,$T0 # 0:1
1659 vpsrlq \$40,$T4,$T4 # 4
1660 vpand $MASK,$T2,$T2 # 2
1661 vpand $MASK,$T0,$T0 # 0
1662 vpand $MASK,$T1,$T1 # 1
1663 vpand $MASK,$T3,$T3 # 3
1664 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1666 lea 0x90(%rsp),%rax # size optimization
1667 vpaddq $H2,$T2,$H2 # accumulate input
1674 ################################################################
1675 # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1676 # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1677 # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1678 # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1679 # \________/\________/
1680 ################################################################
1681 #vpaddq $H2,$T2,$H2 # accumulate input
1683 vmovdqa `32*0`(%rsp),$T0 # r0^4
1685 vmovdqa `32*1`(%rsp),$T1 # r1^4
1687 vmovdqa `32*3`(%rsp),$T2 # r2^4
1689 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1690 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1692 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1693 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1694 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1695 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1696 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1698 # however, as h2 is "chronologically" first one available pull
1699 # corresponding operations up, so it's
1701 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1702 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1703 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1704 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1705 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1707 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1708 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1709 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1710 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1711 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1713 vpmuludq $H0,$T1,$T4 # h0*r1
1714 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1715 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1716 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1717 vpmuludq $H3,$T1,$T4 # h3*r1
1718 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1719 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1720 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1721 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1723 vpmuludq $H0,$T0,$T4 # h0*r0
1724 vpmuludq $H1,$T0,$H2 # h1*r0
1725 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1726 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1727 vpmuludq $H3,$T0,$T4 # h3*r0
1728 vpmuludq $H4,$T0,$H2 # h4*r0
1729 vmovdqu 16*0($inp),%x#$T0 # load input
1730 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1731 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1732 vinserti128 \$1,16*2($inp),$T0,$T0
1734 vpmuludq $H3,$T1,$T4 # h3*s2
1735 vpmuludq $H4,$T1,$H2 # h4*s2
1736 vmovdqu 16*1($inp),%x#$T1
1737 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1738 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1739 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1740 vpmuludq $H1,$T2,$T4 # h1*r2
1741 vpmuludq $H0,$T2,$T2 # h0*r2
1742 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1743 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1744 vinserti128 \$1,16*3($inp),$T1,$T1
1747 vpmuludq $H1,$H2,$T4 # h1*r3
1748 vpmuludq $H0,$H2,$H2 # h0*r3
1749 vpsrldq \$6,$T0,$T2 # splat input
1750 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1751 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1752 vpmuludq $H3,$T3,$T4 # h3*s3
1753 vpmuludq $H4,$T3,$H2 # h4*s3
1755 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1756 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1757 vpunpckhqdq $T1,$T0,$T4 # 4
1759 vpmuludq $H3,$S4,$H3 # h3*s4
1760 vpmuludq $H4,$S4,$H4 # h4*s4
1761 vpunpcklqdq $T1,$T0,$T0 # 0:1
1762 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1763 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1764 vpunpcklqdq $T3,$T2,$T3 # 2:3
1765 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1766 vpmuludq $H1,$S4,$H0 # h1*s4
1767 vmovdqa 64(%rcx),$MASK # .Lmask26
1768 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1769 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1771 ################################################################
1772 # lazy reduction (interleaved with tail of input splat)
1776 vpaddq $D3,$H4,$H4 # h3 -> h4
1780 vpaddq $D0,$D1,$H1 # h0 -> h1
1789 vpaddq $D1,$H2,$H2 # h1 -> h2
1793 vpaddq $D4,$H0,$H0 # h4 -> h0
1795 vpand $MASK,$T2,$T2 # 2
1800 vpaddq $D2,$H3,$H3 # h2 -> h3
1802 vpaddq $T2,$H2,$H2 # modulo-scheduled
1807 vpaddq $D0,$H1,$H1 # h0 -> h1
1809 vpsrlq \$40,$T4,$T4 # 4
1813 vpaddq $D3,$H4,$H4 # h3 -> h4
1815 vpand $MASK,$T0,$T0 # 0
1816 vpand $MASK,$T1,$T1 # 1
1817 vpand $MASK,$T3,$T3 # 3
1818 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1825 ################################################################
1826 # while above multiplications were by r^4 in all lanes, in last
1827 # iteration we multiply least significant lane by r^4 and most
1828 # significant one by r, so copy of above except that references
1829 # to the precomputed table are displaced by 4...
1831 #vpaddq $H2,$T2,$H2 # accumulate input
1833 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1835 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1837 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1839 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1840 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1842 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1843 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1844 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1845 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1846 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1848 vpmuludq $H0,$T1,$T4 # h0*r1
1849 vpmuludq $H1,$T1,$H2 # h1*r1
1850 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1851 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1852 vpmuludq $H3,$T1,$T4 # h3*r1
1853 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1854 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1855 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1857 vpmuludq $H0,$T0,$T4 # h0*r0
1858 vpmuludq $H1,$T0,$H2 # h1*r0
1859 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1860 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1861 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1862 vpmuludq $H3,$T0,$T4 # h3*r0
1863 vpmuludq $H4,$T0,$H2 # h4*r0
1864 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1865 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1867 vpmuludq $H3,$T1,$T4 # h3*s2
1868 vpmuludq $H4,$T1,$H2 # h4*s2
1869 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1870 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1871 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1872 vpmuludq $H1,$T2,$T4 # h1*r2
1873 vpmuludq $H0,$T2,$T2 # h0*r2
1874 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1875 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1877 vpmuludq $H1,$H2,$T4 # h1*r3
1878 vpmuludq $H0,$H2,$H2 # h0*r3
1879 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1880 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1881 vpmuludq $H3,$T3,$T4 # h3*s3
1882 vpmuludq $H4,$T3,$H2 # h4*s3
1883 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1884 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1886 vpmuludq $H3,$S4,$H3 # h3*s4
1887 vpmuludq $H4,$S4,$H4 # h4*s4
1888 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1889 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1890 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
1891 vpmuludq $H1,$S4,$H0 # h1*s4
1892 vmovdqa 64(%rcx),$MASK # .Lmask26
1893 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1894 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1896 ################################################################
1897 # horizontal addition
1910 vpermq \$0x2,$H3,$T3
1911 vpermq \$0x2,$H4,$T4
1912 vpermq \$0x2,$H0,$T0
1913 vpermq \$0x2,$D1,$T1
1914 vpermq \$0x2,$H2,$T2
1921 ################################################################
1926 vpaddq $D3,$H4,$H4 # h3 -> h4
1930 vpaddq $D0,$D1,$H1 # h0 -> h1
1937 vpaddq $D1,$H2,$H2 # h1 -> h2
1941 vpaddq $D4,$H0,$H0 # h4 -> h0
1945 vpaddq $D2,$H3,$H3 # h2 -> h3
1949 vpaddq $D0,$H1,$H1 # h0 -> h1
1953 vpaddq $D3,$H4,$H4 # h3 -> h4
1955 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1956 vmovd %x#$H1,`4*1-48-64`($ctx)
1957 vmovd %x#$H2,`4*2-48-64`($ctx)
1958 vmovd %x#$H3,`4*3-48-64`($ctx)
1959 vmovd %x#$H4,`4*4-48-64`($ctx)
1961 $code.=<<___ if ($win64);
1962 vmovdqa 0x50(%r11),%xmm6
1963 vmovdqa 0x60(%r11),%xmm7
1964 vmovdqa 0x70(%r11),%xmm8
1965 vmovdqa 0x80(%r11),%xmm9
1966 vmovdqa 0x90(%r11),%xmm10
1967 vmovdqa 0xa0(%r11),%xmm11
1968 vmovdqa 0xb0(%r11),%xmm12
1969 vmovdqa 0xc0(%r11),%xmm13
1970 vmovdqa 0xd0(%r11),%xmm14
1971 vmovdqa 0xe0(%r11),%xmm15
1975 $code.=<<___ if (!$win64);
1981 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
1988 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1990 .long 1<<24,0,1<<24,0,1<<24,0,1<<24,0
1992 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
1994 .long 5,0,5,0,5,0,5,0
1999 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2003 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2004 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2012 .extern __imp_RtlVirtualUnwind
2013 .type se_handler,\@abi-omnipotent
2027 mov 120($context),%rax # pull context->Rax
2028 mov 248($context),%rbx # pull context->Rip
2030 mov 8($disp),%rsi # disp->ImageBase
2031 mov 56($disp),%r11 # disp->HandlerData
2033 mov 0(%r11),%r10d # HandlerData[0]
2034 lea (%rsi,%r10),%r10 # prologue label
2035 cmp %r10,%rbx # context->Rip<.Lprologue
2036 jb .Lcommon_seh_tail
2038 mov 152($context),%rax # pull context->Rsp
2040 mov 4(%r11),%r10d # HandlerData[1]
2041 lea (%rsi,%r10),%r10 # epilogue label
2042 cmp %r10,%rbx # context->Rip>=.Lepilogue
2043 jae .Lcommon_seh_tail
2053 mov %rbx,144($context) # restore context->Rbx
2054 mov %rbp,160($context) # restore context->Rbp
2055 mov %r12,216($context) # restore context->R12
2056 mov %r13,224($context) # restore context->R13
2057 mov %r14,232($context) # restore context->R14
2058 mov %r15,240($context) # restore context->R14
2060 jmp .Lcommon_seh_tail
2061 .size se_handler,.-se_handler
2063 .type avx_handler,\@abi-omnipotent
2077 mov 120($context),%rax # pull context->Rax
2078 mov 248($context),%rbx # pull context->Rip
2080 mov 8($disp),%rsi # disp->ImageBase
2081 mov 56($disp),%r11 # disp->HandlerData
2083 mov 0(%r11),%r10d # HandlerData[0]
2084 lea (%rsi,%r10),%r10 # prologue label
2085 cmp %r10,%rbx # context->Rip<prologue label
2086 jb .Lcommon_seh_tail
2088 mov 152($context),%rax # pull context->Rsp
2090 mov 4(%r11),%r10d # HandlerData[1]
2091 lea (%rsi,%r10),%r10 # epilogue label
2092 cmp %r10,%rbx # context->Rip>=epilogue label
2093 jae .Lcommon_seh_tail
2095 mov 208($context),%rax # pull context->R11
2099 lea 512($context),%rdi # &context.Xmm6
2101 .long 0xa548f3fc # cld; rep movsq
2106 mov %rax,152($context) # restore context->Rsp
2107 mov %rsi,168($context) # restore context->Rsi
2108 mov %rdi,176($context) # restore context->Rdi
2110 mov 40($disp),%rdi # disp->ContextRecord
2111 mov $context,%rsi # context
2112 mov \$154,%ecx # sizeof(CONTEXT)
2113 .long 0xa548f3fc # cld; rep movsq
2116 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2117 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2118 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2119 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2120 mov 40(%rsi),%r10 # disp->ContextRecord
2121 lea 56(%rsi),%r11 # &disp->HandlerData
2122 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2123 mov %r10,32(%rsp) # arg5
2124 mov %r11,40(%rsp) # arg6
2125 mov %r12,48(%rsp) # arg7
2126 mov %rcx,56(%rsp) # arg8, (NULL)
2127 call *__imp_RtlVirtualUnwind(%rip)
2129 mov \$1,%eax # ExceptionContinueSearch
2141 .size avx_handler,.-avx_handler
2145 .rva .LSEH_begin_poly1305_init
2146 .rva .LSEH_end_poly1305_init
2147 .rva .LSEH_info_poly1305_init
2149 .rva .LSEH_begin_poly1305_blocks
2150 .rva .LSEH_end_poly1305_blocks
2151 .rva .LSEH_info_poly1305_blocks
2153 .rva .LSEH_begin_poly1305_emit
2154 .rva .LSEH_end_poly1305_emit
2155 .rva .LSEH_info_poly1305_emit
2157 $code.=<<___ if ($avx);
2158 .rva .LSEH_begin_poly1305_blocks_avx
2160 .rva .LSEH_info_poly1305_blocks_avx_1
2164 .rva .LSEH_info_poly1305_blocks_avx_2
2167 .rva .LSEH_end_poly1305_blocks_avx
2168 .rva .LSEH_info_poly1305_blocks_avx_3
2170 .rva .LSEH_begin_poly1305_emit_avx
2171 .rva .LSEH_end_poly1305_emit_avx
2172 .rva .LSEH_info_poly1305_emit_avx
2174 $code.=<<___ if ($avx>1);
2175 .rva .LSEH_begin_poly1305_blocks_avx2
2176 .rva .Lbase2_64_avx2
2177 .rva .LSEH_info_poly1305_blocks_avx2_1
2179 .rva .Lbase2_64_avx2
2181 .rva .LSEH_info_poly1305_blocks_avx2_2
2184 .rva .LSEH_end_poly1305_blocks_avx2
2185 .rva .LSEH_info_poly1305_blocks_avx2_3
2190 .LSEH_info_poly1305_init:
2193 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2195 .LSEH_info_poly1305_blocks:
2198 .rva .Lblocks_body,.Lblocks_epilogue
2200 .LSEH_info_poly1305_emit:
2203 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2205 $code.=<<___ if ($avx);
2206 .LSEH_info_poly1305_blocks_avx_1:
2209 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
2211 .LSEH_info_poly1305_blocks_avx_2:
2214 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
2216 .LSEH_info_poly1305_blocks_avx_3:
2219 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
2221 .LSEH_info_poly1305_emit_avx:
2224 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2226 $code.=<<___ if ($avx>1);
2227 .LSEH_info_poly1305_blocks_avx2_1:
2230 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
2232 .LSEH_info_poly1305_blocks_avx2_2:
2235 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
2237 .LSEH_info_poly1305_blocks_avx2_3:
2240 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
2244 foreach (split('\n',$code)) {
2245 s/\`([^\`]*)\`/eval($1)/ge;
2246 s/%r([a-z]+)#d/%e$1/g;
2247 s/%r([0-9]+)#d/%r$1d/g;