poly1305/asm/poly1305-x86_64.pl: switch to vpermdd in table expansion.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Initial release.
22 #
23 # December 2016
24 #
25 # Add AVX512F+VL+BW code path.
26 #
27 # Numbers are cycles per processed byte with poly1305_blocks alone,
28 # measured with rdtsc at fixed clock frequency.
29 #
30 #               IALU/gcc-4.8(*) AVX(**)         AVX2
31 # P4            4.46/+120%      -
32 # Core 2        2.41/+90%       -
33 # Westmere      1.88/+120%      -
34 # Sandy Bridge  1.39/+140%      1.10
35 # Haswell       1.14/+175%      1.11            0.65
36 # Skylake       1.13/+120%      0.96            0.51
37 # Silvermont    2.83/+95%       -
38 # Goldmont      1.70/+180%      -
39 # VIA Nano      1.82/+150%      -
40 # Sledgehammer  1.38/+160%      -
41 # Bulldozer     2.30/+130%      0.97
42 #
43 # (*)   improvement coefficients relative to clang are more modest and
44 #       are ~50% on most processors, in both cases we are comparing to
45 #       __int128 code;
46 # (**)  SSE2 implementation was attempted, but among non-AVX processors
47 #       it was faster than integer-only code only on older Intel P4 and
48 #       Core processors, 50-30%, less newer processor is, but slower on
49 #       contemporary ones, for example almost 2x slower on Atom, and as
50 #       former are naturally disappearing, SSE2 is deemed unnecessary;
51
52 $flavour = shift;
53 $output  = shift;
54 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
55
56 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
57
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
60 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
61 die "can't locate x86_64-xlate.pl";
62
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
66 }
67
68 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
69            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
70         $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
71         $avx += 1 if ($1==2.11 && $2>=8);
72 }
73
74 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
75            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
76         $avx = ($1>=10) + ($1>=12);
77 }
78
79 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
80         $avx = ($2>=3.0) + ($2>3.0);
81 }
82
83 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
84 *STDOUT=*OUT;
85
86 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
87 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
88 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
89 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
90
91 sub poly1305_iteration {
92 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
93 # output:       $h0-$h2 *= $r0-$r1
94 $code.=<<___;
95         mulq    $h0                     # h0*r1
96         mov     %rax,$d2
97          mov    $r0,%rax
98         mov     %rdx,$d3
99
100         mulq    $h0                     # h0*r0
101         mov     %rax,$h0                # future $h0
102          mov    $r0,%rax
103         mov     %rdx,$d1
104
105         mulq    $h1                     # h1*r0
106         add     %rax,$d2
107          mov    $s1,%rax
108         adc     %rdx,$d3
109
110         mulq    $h1                     # h1*s1
111          mov    $h2,$h1                 # borrow $h1
112         add     %rax,$h0
113         adc     %rdx,$d1
114
115         imulq   $s1,$h1                 # h2*s1
116         add     $h1,$d2
117          mov    $d1,$h1
118         adc     \$0,$d3
119
120         imulq   $r0,$h2                 # h2*r0
121         add     $d2,$h1
122         mov     \$-4,%rax               # mask value
123         adc     $h2,$d3
124
125         and     $d3,%rax                # last reduction step
126         mov     $d3,$h2
127         shr     \$2,$d3
128         and     \$3,$h2
129         add     $d3,%rax
130         add     %rax,$h0
131         adc     \$0,$h1
132         adc     \$0,$h2
133 ___
134 }
135
136 ########################################################################
137 # Layout of opaque area is following.
138 #
139 #       unsigned __int64 h[3];          # current hash value base 2^64
140 #       unsigned __int64 r[2];          # key value base 2^64
141
142 $code.=<<___;
143 .text
144
145 .extern OPENSSL_ia32cap_P
146
147 .globl  poly1305_init
148 .hidden poly1305_init
149 .globl  poly1305_blocks
150 .hidden poly1305_blocks
151 .globl  poly1305_emit
152 .hidden poly1305_emit
153
154 .type   poly1305_init,\@function,3
155 .align  32
156 poly1305_init:
157         xor     %rax,%rax
158         mov     %rax,0($ctx)            # initialize hash value
159         mov     %rax,8($ctx)
160         mov     %rax,16($ctx)
161
162         cmp     \$0,$inp
163         je      .Lno_key
164
165         lea     poly1305_blocks(%rip),%r10
166         lea     poly1305_emit(%rip),%r11
167 ___
168 $code.=<<___    if ($avx);
169         mov     OPENSSL_ia32cap_P+4(%rip),%r9
170         lea     poly1305_blocks_avx(%rip),%rax
171         lea     poly1305_emit_avx(%rip),%rcx
172         bt      \$`60-32`,%r9           # AVX?
173         cmovc   %rax,%r10
174         cmovc   %rcx,%r11
175 ___
176 $code.=<<___    if ($avx>1);
177         lea     poly1305_blocks_avx2(%rip),%rax
178         bt      \$`5+32`,%r9            # AVX2?
179         cmovc   %rax,%r10
180 ___
181 $code.=<<___;
182         mov     \$0x0ffffffc0fffffff,%rax
183         mov     \$0x0ffffffc0ffffffc,%rcx
184         and     0($inp),%rax
185         and     8($inp),%rcx
186         mov     %rax,24($ctx)
187         mov     %rcx,32($ctx)
188 ___
189 $code.=<<___    if ($flavour !~ /elf32/);
190         mov     %r10,0(%rdx)
191         mov     %r11,8(%rdx)
192 ___
193 $code.=<<___    if ($flavour =~ /elf32/);
194         mov     %r10d,0(%rdx)
195         mov     %r11d,4(%rdx)
196 ___
197 $code.=<<___;
198         mov     \$1,%eax
199 .Lno_key:
200         ret
201 .size   poly1305_init,.-poly1305_init
202
203 .type   poly1305_blocks,\@function,4
204 .align  32
205 poly1305_blocks:
206 .Lblocks:
207         shr     \$4,$len
208         jz      .Lno_data               # too short
209
210         push    %rbx
211         push    %rbp
212         push    %r12
213         push    %r13
214         push    %r14
215         push    %r15
216 .Lblocks_body:
217
218         mov     $len,%r15               # reassign $len
219
220         mov     24($ctx),$r0            # load r
221         mov     32($ctx),$s1
222
223         mov     0($ctx),$h0             # load hash value
224         mov     8($ctx),$h1
225         mov     16($ctx),$h2
226
227         mov     $s1,$r1
228         shr     \$2,$s1
229         mov     $r1,%rax
230         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
231         jmp     .Loop
232
233 .align  32
234 .Loop:
235         add     0($inp),$h0             # accumulate input
236         adc     8($inp),$h1
237         lea     16($inp),$inp
238         adc     $padbit,$h2
239 ___
240         &poly1305_iteration();
241 $code.=<<___;
242         mov     $r1,%rax
243         dec     %r15                    # len-=16
244         jnz     .Loop
245
246         mov     $h0,0($ctx)             # store hash value
247         mov     $h1,8($ctx)
248         mov     $h2,16($ctx)
249
250         mov     0(%rsp),%r15
251         mov     8(%rsp),%r14
252         mov     16(%rsp),%r13
253         mov     24(%rsp),%r12
254         mov     32(%rsp),%rbp
255         mov     40(%rsp),%rbx
256         lea     48(%rsp),%rsp
257 .Lno_data:
258 .Lblocks_epilogue:
259         ret
260 .size   poly1305_blocks,.-poly1305_blocks
261
262 .type   poly1305_emit,\@function,3
263 .align  32
264 poly1305_emit:
265 .Lemit:
266         mov     0($ctx),%r8     # load hash value
267         mov     8($ctx),%r9
268         mov     16($ctx),%r10
269
270         mov     %r8,%rax
271         add     \$5,%r8         # compare to modulus
272         mov     %r9,%rcx
273         adc     \$0,%r9
274         adc     \$0,%r10
275         shr     \$2,%r10        # did 130-bit value overfow?
276         cmovnz  %r8,%rax
277         cmovnz  %r9,%rcx
278
279         add     0($nonce),%rax  # accumulate nonce
280         adc     8($nonce),%rcx
281         mov     %rax,0($mac)    # write result
282         mov     %rcx,8($mac)
283
284         ret
285 .size   poly1305_emit,.-poly1305_emit
286 ___
287 if ($avx) {
288
289 ########################################################################
290 # Layout of opaque area is following.
291 #
292 #       unsigned __int32 h[5];          # current hash value base 2^26
293 #       unsigned __int32 is_base2_26;
294 #       unsigned __int64 r[2];          # key value base 2^64
295 #       unsigned __int64 pad;
296 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
297 #
298 # where r^n are base 2^26 digits of degrees of multiplier key. There are
299 # 5 digits, but last four are interleaved with multiples of 5, totalling
300 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
301
302 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
303     map("%xmm$_",(0..15));
304
305 $code.=<<___;
306 .type   __poly1305_block,\@abi-omnipotent
307 .align  32
308 __poly1305_block:
309 ___
310         &poly1305_iteration();
311 $code.=<<___;
312         ret
313 .size   __poly1305_block,.-__poly1305_block
314
315 .type   __poly1305_init_avx,\@abi-omnipotent
316 .align  32
317 __poly1305_init_avx:
318         mov     $r0,$h0
319         mov     $r1,$h1
320         xor     $h2,$h2
321
322         lea     48+64($ctx),$ctx        # size optimization
323
324         mov     $r1,%rax
325         call    __poly1305_block        # r^2
326
327         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
328         mov     \$0x3ffffff,%edx
329         mov     $h0,$d1
330         and     $h0#d,%eax
331         mov     $r0,$d2
332         and     $r0#d,%edx
333         mov     %eax,`16*0+0-64`($ctx)
334         shr     \$26,$d1
335         mov     %edx,`16*0+4-64`($ctx)
336         shr     \$26,$d2
337
338         mov     \$0x3ffffff,%eax
339         mov     \$0x3ffffff,%edx
340         and     $d1#d,%eax
341         and     $d2#d,%edx
342         mov     %eax,`16*1+0-64`($ctx)
343         lea     (%rax,%rax,4),%eax      # *5
344         mov     %edx,`16*1+4-64`($ctx)
345         lea     (%rdx,%rdx,4),%edx      # *5
346         mov     %eax,`16*2+0-64`($ctx)
347         shr     \$26,$d1
348         mov     %edx,`16*2+4-64`($ctx)
349         shr     \$26,$d2
350
351         mov     $h1,%rax
352         mov     $r1,%rdx
353         shl     \$12,%rax
354         shl     \$12,%rdx
355         or      $d1,%rax
356         or      $d2,%rdx
357         and     \$0x3ffffff,%eax
358         and     \$0x3ffffff,%edx
359         mov     %eax,`16*3+0-64`($ctx)
360         lea     (%rax,%rax,4),%eax      # *5
361         mov     %edx,`16*3+4-64`($ctx)
362         lea     (%rdx,%rdx,4),%edx      # *5
363         mov     %eax,`16*4+0-64`($ctx)
364         mov     $h1,$d1
365         mov     %edx,`16*4+4-64`($ctx)
366         mov     $r1,$d2
367
368         mov     \$0x3ffffff,%eax
369         mov     \$0x3ffffff,%edx
370         shr     \$14,$d1
371         shr     \$14,$d2
372         and     $d1#d,%eax
373         and     $d2#d,%edx
374         mov     %eax,`16*5+0-64`($ctx)
375         lea     (%rax,%rax,4),%eax      # *5
376         mov     %edx,`16*5+4-64`($ctx)
377         lea     (%rdx,%rdx,4),%edx      # *5
378         mov     %eax,`16*6+0-64`($ctx)
379         shr     \$26,$d1
380         mov     %edx,`16*6+4-64`($ctx)
381         shr     \$26,$d2
382
383         mov     $h2,%rax
384         shl     \$24,%rax
385         or      %rax,$d1
386         mov     $d1#d,`16*7+0-64`($ctx)
387         lea     ($d1,$d1,4),$d1         # *5
388         mov     $d2#d,`16*7+4-64`($ctx)
389         lea     ($d2,$d2,4),$d2         # *5
390         mov     $d1#d,`16*8+0-64`($ctx)
391         mov     $d2#d,`16*8+4-64`($ctx)
392
393         mov     $r1,%rax
394         call    __poly1305_block        # r^3
395
396         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
397         mov     $h0,$d1
398         and     $h0#d,%eax
399         shr     \$26,$d1
400         mov     %eax,`16*0+12-64`($ctx)
401
402         mov     \$0x3ffffff,%edx
403         and     $d1#d,%edx
404         mov     %edx,`16*1+12-64`($ctx)
405         lea     (%rdx,%rdx,4),%edx      # *5
406         shr     \$26,$d1
407         mov     %edx,`16*2+12-64`($ctx)
408
409         mov     $h1,%rax
410         shl     \$12,%rax
411         or      $d1,%rax
412         and     \$0x3ffffff,%eax
413         mov     %eax,`16*3+12-64`($ctx)
414         lea     (%rax,%rax,4),%eax      # *5
415         mov     $h1,$d1
416         mov     %eax,`16*4+12-64`($ctx)
417
418         mov     \$0x3ffffff,%edx
419         shr     \$14,$d1
420         and     $d1#d,%edx
421         mov     %edx,`16*5+12-64`($ctx)
422         lea     (%rdx,%rdx,4),%edx      # *5
423         shr     \$26,$d1
424         mov     %edx,`16*6+12-64`($ctx)
425
426         mov     $h2,%rax
427         shl     \$24,%rax
428         or      %rax,$d1
429         mov     $d1#d,`16*7+12-64`($ctx)
430         lea     ($d1,$d1,4),$d1         # *5
431         mov     $d1#d,`16*8+12-64`($ctx)
432
433         mov     $r1,%rax
434         call    __poly1305_block        # r^4
435
436         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
437         mov     $h0,$d1
438         and     $h0#d,%eax
439         shr     \$26,$d1
440         mov     %eax,`16*0+8-64`($ctx)
441
442         mov     \$0x3ffffff,%edx
443         and     $d1#d,%edx
444         mov     %edx,`16*1+8-64`($ctx)
445         lea     (%rdx,%rdx,4),%edx      # *5
446         shr     \$26,$d1
447         mov     %edx,`16*2+8-64`($ctx)
448
449         mov     $h1,%rax
450         shl     \$12,%rax
451         or      $d1,%rax
452         and     \$0x3ffffff,%eax
453         mov     %eax,`16*3+8-64`($ctx)
454         lea     (%rax,%rax,4),%eax      # *5
455         mov     $h1,$d1
456         mov     %eax,`16*4+8-64`($ctx)
457
458         mov     \$0x3ffffff,%edx
459         shr     \$14,$d1
460         and     $d1#d,%edx
461         mov     %edx,`16*5+8-64`($ctx)
462         lea     (%rdx,%rdx,4),%edx      # *5
463         shr     \$26,$d1
464         mov     %edx,`16*6+8-64`($ctx)
465
466         mov     $h2,%rax
467         shl     \$24,%rax
468         or      %rax,$d1
469         mov     $d1#d,`16*7+8-64`($ctx)
470         lea     ($d1,$d1,4),$d1         # *5
471         mov     $d1#d,`16*8+8-64`($ctx)
472
473         lea     -48-64($ctx),$ctx       # size [de-]optimization
474         ret
475 .size   __poly1305_init_avx,.-__poly1305_init_avx
476
477 .type   poly1305_blocks_avx,\@function,4
478 .align  32
479 poly1305_blocks_avx:
480         mov     20($ctx),%r8d           # is_base2_26
481         cmp     \$128,$len
482         jae     .Lblocks_avx
483         test    %r8d,%r8d
484         jz      .Lblocks
485
486 .Lblocks_avx:
487         and     \$-16,$len
488         jz      .Lno_data_avx
489
490         vzeroupper
491
492         test    %r8d,%r8d
493         jz      .Lbase2_64_avx
494
495         test    \$31,$len
496         jz      .Leven_avx
497
498         push    %rbx
499         push    %rbp
500         push    %r12
501         push    %r13
502         push    %r14
503         push    %r15
504 .Lblocks_avx_body:
505
506         mov     $len,%r15               # reassign $len
507
508         mov     0($ctx),$d1             # load hash value
509         mov     8($ctx),$d2
510         mov     16($ctx),$h2#d
511
512         mov     24($ctx),$r0            # load r
513         mov     32($ctx),$s1
514
515         ################################# base 2^26 -> base 2^64
516         mov     $d1#d,$h0#d
517         and     \$`-1*(1<<31)`,$d1
518         mov     $d2,$r1                 # borrow $r1
519         mov     $d2#d,$h1#d
520         and     \$`-1*(1<<31)`,$d2
521
522         shr     \$6,$d1
523         shl     \$52,$r1
524         add     $d1,$h0
525         shr     \$12,$h1
526         shr     \$18,$d2
527         add     $r1,$h0
528         adc     $d2,$h1
529
530         mov     $h2,$d1
531         shl     \$40,$d1
532         shr     \$24,$h2
533         add     $d1,$h1
534         adc     \$0,$h2                 # can be partially reduced...
535
536         mov     \$-4,$d2                # ... so reduce
537         mov     $h2,$d1
538         and     $h2,$d2
539         shr     \$2,$d1
540         and     \$3,$h2
541         add     $d2,$d1                 # =*5
542         add     $d1,$h0
543         adc     \$0,$h1
544         adc     \$0,$h2
545
546         mov     $s1,$r1
547         mov     $s1,%rax
548         shr     \$2,$s1
549         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
550
551         add     0($inp),$h0             # accumulate input
552         adc     8($inp),$h1
553         lea     16($inp),$inp
554         adc     $padbit,$h2
555
556         call    __poly1305_block
557
558         test    $padbit,$padbit         # if $padbit is zero,
559         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
560
561         ################################# base 2^64 -> base 2^26
562         mov     $h0,%rax
563         mov     $h0,%rdx
564         shr     \$52,$h0
565         mov     $h1,$r0
566         mov     $h1,$r1
567         shr     \$26,%rdx
568         and     \$0x3ffffff,%rax        # h[0]
569         shl     \$12,$r0
570         and     \$0x3ffffff,%rdx        # h[1]
571         shr     \$14,$h1
572         or      $r0,$h0
573         shl     \$24,$h2
574         and     \$0x3ffffff,$h0         # h[2]
575         shr     \$40,$r1
576         and     \$0x3ffffff,$h1         # h[3]
577         or      $r1,$h2                 # h[4]
578
579         sub     \$16,%r15
580         jz      .Lstore_base2_26_avx
581
582         vmovd   %rax#d,$H0
583         vmovd   %rdx#d,$H1
584         vmovd   $h0#d,$H2
585         vmovd   $h1#d,$H3
586         vmovd   $h2#d,$H4
587         jmp     .Lproceed_avx
588
589 .align  32
590 .Lstore_base2_64_avx:
591         mov     $h0,0($ctx)
592         mov     $h1,8($ctx)
593         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
594         jmp     .Ldone_avx
595
596 .align  16
597 .Lstore_base2_26_avx:
598         mov     %rax#d,0($ctx)          # store hash value base 2^26
599         mov     %rdx#d,4($ctx)
600         mov     $h0#d,8($ctx)
601         mov     $h1#d,12($ctx)
602         mov     $h2#d,16($ctx)
603 .align  16
604 .Ldone_avx:
605         mov     0(%rsp),%r15
606         mov     8(%rsp),%r14
607         mov     16(%rsp),%r13
608         mov     24(%rsp),%r12
609         mov     32(%rsp),%rbp
610         mov     40(%rsp),%rbx
611         lea     48(%rsp),%rsp
612 .Lno_data_avx:
613 .Lblocks_avx_epilogue:
614         ret
615
616 .align  32
617 .Lbase2_64_avx:
618         push    %rbx
619         push    %rbp
620         push    %r12
621         push    %r13
622         push    %r14
623         push    %r15
624 .Lbase2_64_avx_body:
625
626         mov     $len,%r15               # reassign $len
627
628         mov     24($ctx),$r0            # load r
629         mov     32($ctx),$s1
630
631         mov     0($ctx),$h0             # load hash value
632         mov     8($ctx),$h1
633         mov     16($ctx),$h2#d
634
635         mov     $s1,$r1
636         mov     $s1,%rax
637         shr     \$2,$s1
638         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
639
640         test    \$31,$len
641         jz      .Linit_avx
642
643         add     0($inp),$h0             # accumulate input
644         adc     8($inp),$h1
645         lea     16($inp),$inp
646         adc     $padbit,$h2
647         sub     \$16,%r15
648
649         call    __poly1305_block
650
651 .Linit_avx:
652         ################################# base 2^64 -> base 2^26
653         mov     $h0,%rax
654         mov     $h0,%rdx
655         shr     \$52,$h0
656         mov     $h1,$d1
657         mov     $h1,$d2
658         shr     \$26,%rdx
659         and     \$0x3ffffff,%rax        # h[0]
660         shl     \$12,$d1
661         and     \$0x3ffffff,%rdx        # h[1]
662         shr     \$14,$h1
663         or      $d1,$h0
664         shl     \$24,$h2
665         and     \$0x3ffffff,$h0         # h[2]
666         shr     \$40,$d2
667         and     \$0x3ffffff,$h1         # h[3]
668         or      $d2,$h2                 # h[4]
669
670         vmovd   %rax#d,$H0
671         vmovd   %rdx#d,$H1
672         vmovd   $h0#d,$H2
673         vmovd   $h1#d,$H3
674         vmovd   $h2#d,$H4
675         movl    \$1,20($ctx)            # set is_base2_26
676
677         call    __poly1305_init_avx
678
679 .Lproceed_avx:
680         mov     %r15,$len
681
682         mov     0(%rsp),%r15
683         mov     8(%rsp),%r14
684         mov     16(%rsp),%r13
685         mov     24(%rsp),%r12
686         mov     32(%rsp),%rbp
687         mov     40(%rsp),%rbx
688         lea     48(%rsp),%rax
689         lea     48(%rsp),%rsp
690 .Lbase2_64_avx_epilogue:
691         jmp     .Ldo_avx
692
693 .align  32
694 .Leven_avx:
695         vmovd           4*0($ctx),$H0           # load hash value
696         vmovd           4*1($ctx),$H1
697         vmovd           4*2($ctx),$H2
698         vmovd           4*3($ctx),$H3
699         vmovd           4*4($ctx),$H4
700
701 .Ldo_avx:
702 ___
703 $code.=<<___    if (!$win64);
704         lea             -0x58(%rsp),%r11
705         sub             \$0x178,%rsp
706 ___
707 $code.=<<___    if ($win64);
708         lea             -0xf8(%rsp),%r11
709         sub             \$0x218,%rsp
710         vmovdqa         %xmm6,0x50(%r11)
711         vmovdqa         %xmm7,0x60(%r11)
712         vmovdqa         %xmm8,0x70(%r11)
713         vmovdqa         %xmm9,0x80(%r11)
714         vmovdqa         %xmm10,0x90(%r11)
715         vmovdqa         %xmm11,0xa0(%r11)
716         vmovdqa         %xmm12,0xb0(%r11)
717         vmovdqa         %xmm13,0xc0(%r11)
718         vmovdqa         %xmm14,0xd0(%r11)
719         vmovdqa         %xmm15,0xe0(%r11)
720 .Ldo_avx_body:
721 ___
722 $code.=<<___;
723         sub             \$64,$len
724         lea             -32($inp),%rax
725         cmovc           %rax,$inp
726
727         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
728         lea             `16*3+64`($ctx),$ctx    # size optimization
729         lea             .Lconst(%rip),%rcx
730
731         ################################################################
732         # load input
733         vmovdqu         16*2($inp),$T0
734         vmovdqu         16*3($inp),$T1
735         vmovdqa         64(%rcx),$MASK          # .Lmask26
736
737         vpsrldq         \$6,$T0,$T2             # splat input
738         vpsrldq         \$6,$T1,$T3
739         vpunpckhqdq     $T1,$T0,$T4             # 4
740         vpunpcklqdq     $T1,$T0,$T0             # 0:1
741         vpunpcklqdq     $T3,$T2,$T3             # 2:3
742
743         vpsrlq          \$40,$T4,$T4            # 4
744         vpsrlq          \$26,$T0,$T1
745         vpand           $MASK,$T0,$T0           # 0
746         vpsrlq          \$4,$T3,$T2
747         vpand           $MASK,$T1,$T1           # 1
748         vpsrlq          \$30,$T3,$T3
749         vpand           $MASK,$T2,$T2           # 2
750         vpand           $MASK,$T3,$T3           # 3
751         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
752
753         jbe             .Lskip_loop_avx
754
755         # expand and copy pre-calculated table to stack
756         vmovdqu         `16*1-64`($ctx),$D1
757         vmovdqu         `16*2-64`($ctx),$D2
758         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
759         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
760         vmovdqa         $D3,-0x90(%r11)
761         vmovdqa         $D0,0x00(%rsp)
762         vpshufd         \$0xEE,$D1,$D4
763         vmovdqu         `16*3-64`($ctx),$D0
764         vpshufd         \$0x44,$D1,$D1
765         vmovdqa         $D4,-0x80(%r11)
766         vmovdqa         $D1,0x10(%rsp)
767         vpshufd         \$0xEE,$D2,$D3
768         vmovdqu         `16*4-64`($ctx),$D1
769         vpshufd         \$0x44,$D2,$D2
770         vmovdqa         $D3,-0x70(%r11)
771         vmovdqa         $D2,0x20(%rsp)
772         vpshufd         \$0xEE,$D0,$D4
773         vmovdqu         `16*5-64`($ctx),$D2
774         vpshufd         \$0x44,$D0,$D0
775         vmovdqa         $D4,-0x60(%r11)
776         vmovdqa         $D0,0x30(%rsp)
777         vpshufd         \$0xEE,$D1,$D3
778         vmovdqu         `16*6-64`($ctx),$D0
779         vpshufd         \$0x44,$D1,$D1
780         vmovdqa         $D3,-0x50(%r11)
781         vmovdqa         $D1,0x40(%rsp)
782         vpshufd         \$0xEE,$D2,$D4
783         vmovdqu         `16*7-64`($ctx),$D1
784         vpshufd         \$0x44,$D2,$D2
785         vmovdqa         $D4,-0x40(%r11)
786         vmovdqa         $D2,0x50(%rsp)
787         vpshufd         \$0xEE,$D0,$D3
788         vmovdqu         `16*8-64`($ctx),$D2
789         vpshufd         \$0x44,$D0,$D0
790         vmovdqa         $D3,-0x30(%r11)
791         vmovdqa         $D0,0x60(%rsp)
792         vpshufd         \$0xEE,$D1,$D4
793         vpshufd         \$0x44,$D1,$D1
794         vmovdqa         $D4,-0x20(%r11)
795         vmovdqa         $D1,0x70(%rsp)
796         vpshufd         \$0xEE,$D2,$D3
797          vmovdqa        0x00(%rsp),$D4          # preload r0^2
798         vpshufd         \$0x44,$D2,$D2
799         vmovdqa         $D3,-0x10(%r11)
800         vmovdqa         $D2,0x80(%rsp)
801
802         jmp             .Loop_avx
803
804 .align  32
805 .Loop_avx:
806         ################################################################
807         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
808         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
809         #   \___________________/
810         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
811         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
812         #   \___________________/ \____________________/
813         #
814         # Note that we start with inp[2:3]*r^2. This is because it
815         # doesn't depend on reduction in previous iteration.
816         ################################################################
817         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
818         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
819         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
820         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
821         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
822         #
823         # though note that $Tx and $Hx are "reversed" in this section,
824         # and $D4 is preloaded with r0^2...
825
826         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
827         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
828           vmovdqa       $H2,0x20(%r11)                          # offload hash
829         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
830          vmovdqa        0x10(%rsp),$H2          # r1^2
831         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
832         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
833
834           vmovdqa       $H0,0x00(%r11)                          #
835         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
836           vmovdqa       $H1,0x10(%r11)                          #
837         vpmuludq        $T3,$H2,$H1             # h3*r1
838         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
839         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
840           vmovdqa       $H3,0x30(%r11)                          #
841         vpmuludq        $T2,$H2,$H0             # h2*r1
842         vpmuludq        $T1,$H2,$H1             # h1*r1
843         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
844          vmovdqa        0x30(%rsp),$H3          # r2^2
845         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
846           vmovdqa       $H4,0x40(%r11)                          #
847         vpmuludq        $T0,$H2,$H2             # h0*r1
848          vpmuludq       $T2,$H3,$H0             # h2*r2
849         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
850
851          vmovdqa        0x40(%rsp),$H4          # s2^2
852         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
853         vpmuludq        $T1,$H3,$H1             # h1*r2
854         vpmuludq        $T0,$H3,$H3             # h0*r2
855         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
856          vmovdqa        0x50(%rsp),$H2          # r3^2
857         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
858         vpmuludq        $T4,$H4,$H0             # h4*s2
859         vpmuludq        $T3,$H4,$H4             # h3*s2
860         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
861          vmovdqa        0x60(%rsp),$H3          # s3^2
862         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
863
864          vmovdqa        0x80(%rsp),$H4          # s4^2
865         vpmuludq        $T1,$H2,$H1             # h1*r3
866         vpmuludq        $T0,$H2,$H2             # h0*r3
867         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
868         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
869         vpmuludq        $T4,$H3,$H0             # h4*s3
870         vpmuludq        $T3,$H3,$H1             # h3*s3
871         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
872          vmovdqu        16*0($inp),$H0                          # load input
873         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
874         vpmuludq        $T2,$H3,$H3             # h2*s3
875          vpmuludq       $T2,$H4,$T2             # h2*s4
876         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
877
878          vmovdqu        16*1($inp),$H1                          #
879         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
880         vpmuludq        $T3,$H4,$T3             # h3*s4
881         vpmuludq        $T4,$H4,$T4             # h4*s4
882          vpsrldq        \$6,$H0,$H2                             # splat input
883         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
884         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
885          vpsrldq        \$6,$H1,$H3                             #
886         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
887         vpmuludq        $T1,$H4,$T0             # h1*s4
888          vpunpckhqdq    $H1,$H0,$H4             # 4
889         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
890          vmovdqa        -0x90(%r11),$T4         # r0^4
891         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
892
893         vpunpcklqdq     $H1,$H0,$H0             # 0:1
894         vpunpcklqdq     $H3,$H2,$H3             # 2:3
895
896         #vpsrlq         \$40,$H4,$H4            # 4
897         vpsrldq         \$`40/8`,$H4,$H4        # 4
898         vpsrlq          \$26,$H0,$H1
899         vpand           $MASK,$H0,$H0           # 0
900         vpsrlq          \$4,$H3,$H2
901         vpand           $MASK,$H1,$H1           # 1
902         vpand           0(%rcx),$H4,$H4         # .Lmask24
903         vpsrlq          \$30,$H3,$H3
904         vpand           $MASK,$H2,$H2           # 2
905         vpand           $MASK,$H3,$H3           # 3
906         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
907
908         vpaddq          0x00(%r11),$H0,$H0      # add hash value
909         vpaddq          0x10(%r11),$H1,$H1
910         vpaddq          0x20(%r11),$H2,$H2
911         vpaddq          0x30(%r11),$H3,$H3
912         vpaddq          0x40(%r11),$H4,$H4
913
914         lea             16*2($inp),%rax
915         lea             16*4($inp),$inp
916         sub             \$64,$len
917         cmovc           %rax,$inp
918
919         ################################################################
920         # Now we accumulate (inp[0:1]+hash)*r^4
921         ################################################################
922         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
923         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
924         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
925         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
926         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
927
928         vpmuludq        $H0,$T4,$T0             # h0*r0
929         vpmuludq        $H1,$T4,$T1             # h1*r0
930         vpaddq          $T0,$D0,$D0
931         vpaddq          $T1,$D1,$D1
932          vmovdqa        -0x80(%r11),$T2         # r1^4
933         vpmuludq        $H2,$T4,$T0             # h2*r0
934         vpmuludq        $H3,$T4,$T1             # h3*r0
935         vpaddq          $T0,$D2,$D2
936         vpaddq          $T1,$D3,$D3
937         vpmuludq        $H4,$T4,$T4             # h4*r0
938          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
939         vpaddq          $T4,$D4,$D4
940
941         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
942         vpmuludq        $H2,$T2,$T1             # h2*r1
943         vpmuludq        $H3,$T2,$T0             # h3*r1
944         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
945          vmovdqa        -0x60(%r11),$T3         # r2^4
946         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
947         vpmuludq        $H1,$T2,$T1             # h1*r1
948         vpmuludq        $H0,$T2,$T2             # h0*r1
949         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
950         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
951
952          vmovdqa        -0x50(%r11),$T4         # s2^4
953         vpmuludq        $H2,$T3,$T0             # h2*r2
954         vpmuludq        $H1,$T3,$T1             # h1*r2
955         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
956         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
957          vmovdqa        -0x40(%r11),$T2         # r3^4
958         vpmuludq        $H0,$T3,$T3             # h0*r2
959         vpmuludq        $H4,$T4,$T0             # h4*s2
960         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
961         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
962          vmovdqa        -0x30(%r11),$T3         # s3^4
963         vpmuludq        $H3,$T4,$T4             # h3*s2
964          vpmuludq       $H1,$T2,$T1             # h1*r3
965         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
966
967          vmovdqa        -0x10(%r11),$T4         # s4^4
968         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
969         vpmuludq        $H0,$T2,$T2             # h0*r3
970         vpmuludq        $H4,$T3,$T0             # h4*s3
971         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
972         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
973          vmovdqu        16*2($inp),$T0                          # load input
974         vpmuludq        $H3,$T3,$T2             # h3*s3
975         vpmuludq        $H2,$T3,$T3             # h2*s3
976         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
977          vmovdqu        16*3($inp),$T1                          #
978         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
979
980         vpmuludq        $H2,$T4,$H2             # h2*s4
981         vpmuludq        $H3,$T4,$H3             # h3*s4
982          vpsrldq        \$6,$T0,$T2                             # splat input
983         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
984         vpmuludq        $H4,$T4,$H4             # h4*s4
985          vpsrldq        \$6,$T1,$T3                             #
986         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
987         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
988         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
989         vpmuludq        $H1,$T4,$H0
990          vpunpckhqdq    $T1,$T0,$T4             # 4
991         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
992         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
993
994         vpunpcklqdq     $T1,$T0,$T0             # 0:1
995         vpunpcklqdq     $T3,$T2,$T3             # 2:3
996
997         #vpsrlq         \$40,$T4,$T4            # 4
998         vpsrldq         \$`40/8`,$T4,$T4        # 4
999         vpsrlq          \$26,$T0,$T1
1000          vmovdqa        0x00(%rsp),$D4          # preload r0^2
1001         vpand           $MASK,$T0,$T0           # 0
1002         vpsrlq          \$4,$T3,$T2
1003         vpand           $MASK,$T1,$T1           # 1
1004         vpand           0(%rcx),$T4,$T4         # .Lmask24
1005         vpsrlq          \$30,$T3,$T3
1006         vpand           $MASK,$T2,$T2           # 2
1007         vpand           $MASK,$T3,$T3           # 3
1008         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1009
1010         ################################################################
1011         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1012         # and P. Schwabe
1013
1014         vpsrlq          \$26,$H3,$D3
1015         vpand           $MASK,$H3,$H3
1016         vpaddq          $D3,$H4,$H4             # h3 -> h4
1017
1018         vpsrlq          \$26,$H0,$D0
1019         vpand           $MASK,$H0,$H0
1020         vpaddq          $D0,$D1,$H1             # h0 -> h1
1021
1022         vpsrlq          \$26,$H4,$D0
1023         vpand           $MASK,$H4,$H4
1024
1025         vpsrlq          \$26,$H1,$D1
1026         vpand           $MASK,$H1,$H1
1027         vpaddq          $D1,$H2,$H2             # h1 -> h2
1028
1029         vpaddq          $D0,$H0,$H0
1030         vpsllq          \$2,$D0,$D0
1031         vpaddq          $D0,$H0,$H0             # h4 -> h0
1032
1033         vpsrlq          \$26,$H2,$D2
1034         vpand           $MASK,$H2,$H2
1035         vpaddq          $D2,$H3,$H3             # h2 -> h3
1036
1037         vpsrlq          \$26,$H0,$D0
1038         vpand           $MASK,$H0,$H0
1039         vpaddq          $D0,$H1,$H1             # h0 -> h1
1040
1041         vpsrlq          \$26,$H3,$D3
1042         vpand           $MASK,$H3,$H3
1043         vpaddq          $D3,$H4,$H4             # h3 -> h4
1044
1045         ja              .Loop_avx
1046
1047 .Lskip_loop_avx:
1048         ################################################################
1049         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1050
1051         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1052         add             \$32,$len
1053         jnz             .Long_tail_avx
1054
1055         vpaddq          $H2,$T2,$T2
1056         vpaddq          $H0,$T0,$T0
1057         vpaddq          $H1,$T1,$T1
1058         vpaddq          $H3,$T3,$T3
1059         vpaddq          $H4,$T4,$T4
1060
1061 .Long_tail_avx:
1062         vmovdqa         $H2,0x20(%r11)
1063         vmovdqa         $H0,0x00(%r11)
1064         vmovdqa         $H1,0x10(%r11)
1065         vmovdqa         $H3,0x30(%r11)
1066         vmovdqa         $H4,0x40(%r11)
1067
1068         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1069         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1070         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1071         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1072         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1073
1074         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1075         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1076          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1077         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1078         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1079         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1080
1081         vpmuludq        $T3,$H2,$H0             # h3*r1
1082         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1083          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1084         vpmuludq        $T2,$H2,$H1             # h2*r1
1085         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1086          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1087         vpmuludq        $T1,$H2,$H0             # h1*r1
1088         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1089         vpmuludq        $T0,$H2,$H2             # h0*r1
1090         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1091         vpmuludq        $T4,$H3,$H3             # h4*s1
1092         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1093
1094          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1095         vpmuludq        $T2,$H4,$H1             # h2*r2
1096         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1097         vpmuludq        $T1,$H4,$H0             # h1*r2
1098         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1099          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1100         vpmuludq        $T0,$H4,$H4             # h0*r2
1101         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1102         vpmuludq        $T4,$H2,$H1             # h4*s2
1103         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1104          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1105         vpmuludq        $T3,$H2,$H2             # h3*s2
1106         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1107
1108         vpmuludq        $T1,$H3,$H0             # h1*r3
1109         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1110         vpmuludq        $T0,$H3,$H3             # h0*r3
1111         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1112          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1113         vpmuludq        $T4,$H4,$H1             # h4*s3
1114         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1115          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1116         vpmuludq        $T3,$H4,$H0             # h3*s3
1117         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1118         vpmuludq        $T2,$H4,$H4             # h2*s3
1119         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1120
1121         vpmuludq        $T0,$H2,$H2             # h0*r4
1122         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1123         vpmuludq        $T4,$H3,$H1             # h4*s4
1124         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1125         vpmuludq        $T3,$H3,$H0             # h3*s4
1126         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1127         vpmuludq        $T2,$H3,$H1             # h2*s4
1128         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1129         vpmuludq        $T1,$H3,$H3             # h1*s4
1130         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1131
1132         jz              .Lshort_tail_avx
1133
1134         vmovdqu         16*0($inp),$H0          # load input
1135         vmovdqu         16*1($inp),$H1
1136
1137         vpsrldq         \$6,$H0,$H2             # splat input
1138         vpsrldq         \$6,$H1,$H3
1139         vpunpckhqdq     $H1,$H0,$H4             # 4
1140         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1141         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1142
1143         vpsrlq          \$40,$H4,$H4            # 4
1144         vpsrlq          \$26,$H0,$H1
1145         vpand           $MASK,$H0,$H0           # 0
1146         vpsrlq          \$4,$H3,$H2
1147         vpand           $MASK,$H1,$H1           # 1
1148         vpsrlq          \$30,$H3,$H3
1149         vpand           $MASK,$H2,$H2           # 2
1150         vpand           $MASK,$H3,$H3           # 3
1151         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1152
1153         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1154         vpaddq          0x00(%r11),$H0,$H0
1155         vpaddq          0x10(%r11),$H1,$H1
1156         vpaddq          0x20(%r11),$H2,$H2
1157         vpaddq          0x30(%r11),$H3,$H3
1158         vpaddq          0x40(%r11),$H4,$H4
1159
1160         ################################################################
1161         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1162
1163         vpmuludq        $H0,$T4,$T0             # h0*r0
1164         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1165         vpmuludq        $H1,$T4,$T1             # h1*r0
1166         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1167         vpmuludq        $H2,$T4,$T0             # h2*r0
1168         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1169          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1170         vpmuludq        $H3,$T4,$T1             # h3*r0
1171         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1172         vpmuludq        $H4,$T4,$T4             # h4*r0
1173         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1174
1175         vpmuludq        $H3,$T2,$T0             # h3*r1
1176         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1177          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1178         vpmuludq        $H2,$T2,$T1             # h2*r1
1179         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1180          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1181         vpmuludq        $H1,$T2,$T0             # h1*r1
1182         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1183         vpmuludq        $H0,$T2,$T2             # h0*r1
1184         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1185         vpmuludq        $H4,$T3,$T3             # h4*s1
1186         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1187
1188          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1189         vpmuludq        $H2,$T4,$T1             # h2*r2
1190         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1191         vpmuludq        $H1,$T4,$T0             # h1*r2
1192         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1193          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1194         vpmuludq        $H0,$T4,$T4             # h0*r2
1195         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1196         vpmuludq        $H4,$T2,$T1             # h4*s2
1197         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1198          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1199         vpmuludq        $H3,$T2,$T2             # h3*s2
1200         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1201
1202         vpmuludq        $H1,$T3,$T0             # h1*r3
1203         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1204         vpmuludq        $H0,$T3,$T3             # h0*r3
1205         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1206          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1207         vpmuludq        $H4,$T4,$T1             # h4*s3
1208         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1209          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1210         vpmuludq        $H3,$T4,$T0             # h3*s3
1211         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1212         vpmuludq        $H2,$T4,$T4             # h2*s3
1213         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1214
1215         vpmuludq        $H0,$T2,$T2             # h0*r4
1216         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1217         vpmuludq        $H4,$T3,$T1             # h4*s4
1218         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1219         vpmuludq        $H3,$T3,$T0             # h3*s4
1220         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1221         vpmuludq        $H2,$T3,$T1             # h2*s4
1222         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1223         vpmuludq        $H1,$T3,$T3             # h1*s4
1224         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1225
1226 .Lshort_tail_avx:
1227         ################################################################
1228         # horizontal addition
1229
1230         vpsrldq         \$8,$D4,$T4
1231         vpsrldq         \$8,$D3,$T3
1232         vpsrldq         \$8,$D1,$T1
1233         vpsrldq         \$8,$D0,$T0
1234         vpsrldq         \$8,$D2,$T2
1235         vpaddq          $T3,$D3,$D3
1236         vpaddq          $T4,$D4,$D4
1237         vpaddq          $T0,$D0,$D0
1238         vpaddq          $T1,$D1,$D1
1239         vpaddq          $T2,$D2,$D2
1240
1241         ################################################################
1242         # lazy reduction
1243
1244         vpsrlq          \$26,$D3,$H3
1245         vpand           $MASK,$D3,$D3
1246         vpaddq          $H3,$D4,$D4             # h3 -> h4
1247
1248         vpsrlq          \$26,$D0,$H0
1249         vpand           $MASK,$D0,$D0
1250         vpaddq          $H0,$D1,$D1             # h0 -> h1
1251
1252         vpsrlq          \$26,$D4,$H4
1253         vpand           $MASK,$D4,$D4
1254
1255         vpsrlq          \$26,$D1,$H1
1256         vpand           $MASK,$D1,$D1
1257         vpaddq          $H1,$D2,$D2             # h1 -> h2
1258
1259         vpaddq          $H4,$D0,$D0
1260         vpsllq          \$2,$H4,$H4
1261         vpaddq          $H4,$D0,$D0             # h4 -> h0
1262
1263         vpsrlq          \$26,$D2,$H2
1264         vpand           $MASK,$D2,$D2
1265         vpaddq          $H2,$D3,$D3             # h2 -> h3
1266
1267         vpsrlq          \$26,$D0,$H0
1268         vpand           $MASK,$D0,$D0
1269         vpaddq          $H0,$D1,$D1             # h0 -> h1
1270
1271         vpsrlq          \$26,$D3,$H3
1272         vpand           $MASK,$D3,$D3
1273         vpaddq          $H3,$D4,$D4             # h3 -> h4
1274
1275         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1276         vmovd           $D1,`4*1-48-64`($ctx)
1277         vmovd           $D2,`4*2-48-64`($ctx)
1278         vmovd           $D3,`4*3-48-64`($ctx)
1279         vmovd           $D4,`4*4-48-64`($ctx)
1280 ___
1281 $code.=<<___    if ($win64);
1282         vmovdqa         0x50(%r11),%xmm6
1283         vmovdqa         0x60(%r11),%xmm7
1284         vmovdqa         0x70(%r11),%xmm8
1285         vmovdqa         0x80(%r11),%xmm9
1286         vmovdqa         0x90(%r11),%xmm10
1287         vmovdqa         0xa0(%r11),%xmm11
1288         vmovdqa         0xb0(%r11),%xmm12
1289         vmovdqa         0xc0(%r11),%xmm13
1290         vmovdqa         0xd0(%r11),%xmm14
1291         vmovdqa         0xe0(%r11),%xmm15
1292         lea             0xf8(%r11),%rsp
1293 .Ldo_avx_epilogue:
1294 ___
1295 $code.=<<___    if (!$win64);
1296         lea             0x58(%r11),%rsp
1297 ___
1298 $code.=<<___;
1299         vzeroupper
1300         ret
1301 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1302
1303 .type   poly1305_emit_avx,\@function,3
1304 .align  32
1305 poly1305_emit_avx:
1306         cmpl    \$0,20($ctx)    # is_base2_26?
1307         je      .Lemit
1308
1309         mov     0($ctx),%eax    # load hash value base 2^26
1310         mov     4($ctx),%ecx
1311         mov     8($ctx),%r8d
1312         mov     12($ctx),%r11d
1313         mov     16($ctx),%r10d
1314
1315         shl     \$26,%rcx       # base 2^26 -> base 2^64
1316         mov     %r8,%r9
1317         shl     \$52,%r8
1318         add     %rcx,%rax
1319         shr     \$12,%r9
1320         add     %rax,%r8        # h0
1321         adc     \$0,%r9
1322
1323         shl     \$14,%r11
1324         mov     %r10,%rax
1325         shr     \$24,%r10
1326         add     %r11,%r9
1327         shl     \$40,%rax
1328         add     %rax,%r9        # h1
1329         adc     \$0,%r10        # h2
1330
1331         mov     %r10,%rax       # could be partially reduced, so reduce
1332         mov     %r10,%rcx
1333         and     \$3,%r10
1334         shr     \$2,%rax
1335         and     \$-4,%rcx
1336         add     %rcx,%rax
1337         add     %rax,%r8
1338         adc     \$0,%r9
1339         adc     \$0,%r10
1340
1341         mov     %r8,%rax
1342         add     \$5,%r8         # compare to modulus
1343         mov     %r9,%rcx
1344         adc     \$0,%r9
1345         adc     \$0,%r10
1346         shr     \$2,%r10        # did 130-bit value overfow?
1347         cmovnz  %r8,%rax
1348         cmovnz  %r9,%rcx
1349
1350         add     0($nonce),%rax  # accumulate nonce
1351         adc     8($nonce),%rcx
1352         mov     %rax,0($mac)    # write result
1353         mov     %rcx,8($mac)
1354
1355         ret
1356 .size   poly1305_emit_avx,.-poly1305_emit_avx
1357 ___
1358
1359 if ($avx>1) {
1360 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1361     map("%ymm$_",(0..15));
1362 my $S4=$MASK;
1363
1364 $code.=<<___;
1365 .type   poly1305_blocks_avx2,\@function,4
1366 .align  32
1367 poly1305_blocks_avx2:
1368         mov     20($ctx),%r8d           # is_base2_26
1369         cmp     \$128,$len
1370         jae     .Lblocks_avx2
1371         test    %r8d,%r8d
1372         jz      .Lblocks
1373
1374 .Lblocks_avx2:
1375         and     \$-16,$len
1376         jz      .Lno_data_avx2
1377
1378         vzeroupper
1379
1380         test    %r8d,%r8d
1381         jz      .Lbase2_64_avx2
1382
1383         test    \$63,$len
1384         jz      .Leven_avx2
1385
1386         push    %rbx
1387         push    %rbp
1388         push    %r12
1389         push    %r13
1390         push    %r14
1391         push    %r15
1392 .Lblocks_avx2_body:
1393
1394         mov     $len,%r15               # reassign $len
1395
1396         mov     0($ctx),$d1             # load hash value
1397         mov     8($ctx),$d2
1398         mov     16($ctx),$h2#d
1399
1400         mov     24($ctx),$r0            # load r
1401         mov     32($ctx),$s1
1402
1403         ################################# base 2^26 -> base 2^64
1404         mov     $d1#d,$h0#d
1405         and     \$`-1*(1<<31)`,$d1
1406         mov     $d2,$r1                 # borrow $r1
1407         mov     $d2#d,$h1#d
1408         and     \$`-1*(1<<31)`,$d2
1409
1410         shr     \$6,$d1
1411         shl     \$52,$r1
1412         add     $d1,$h0
1413         shr     \$12,$h1
1414         shr     \$18,$d2
1415         add     $r1,$h0
1416         adc     $d2,$h1
1417
1418         mov     $h2,$d1
1419         shl     \$40,$d1
1420         shr     \$24,$h2
1421         add     $d1,$h1
1422         adc     \$0,$h2                 # can be partially reduced...
1423
1424         mov     \$-4,$d2                # ... so reduce
1425         mov     $h2,$d1
1426         and     $h2,$d2
1427         shr     \$2,$d1
1428         and     \$3,$h2
1429         add     $d2,$d1                 # =*5
1430         add     $d1,$h0
1431         adc     \$0,$h1
1432         adc     \$0,$h2
1433
1434         mov     $s1,$r1
1435         mov     $s1,%rax
1436         shr     \$2,$s1
1437         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1438
1439 .Lbase2_26_pre_avx2:
1440         add     0($inp),$h0             # accumulate input
1441         adc     8($inp),$h1
1442         lea     16($inp),$inp
1443         adc     $padbit,$h2
1444         sub     \$16,%r15
1445
1446         call    __poly1305_block
1447         mov     $r1,%rax
1448
1449         test    \$63,%r15
1450         jnz     .Lbase2_26_pre_avx2
1451
1452         test    $padbit,$padbit         # if $padbit is zero,
1453         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1454
1455         ################################# base 2^64 -> base 2^26
1456         mov     $h0,%rax
1457         mov     $h0,%rdx
1458         shr     \$52,$h0
1459         mov     $h1,$r0
1460         mov     $h1,$r1
1461         shr     \$26,%rdx
1462         and     \$0x3ffffff,%rax        # h[0]
1463         shl     \$12,$r0
1464         and     \$0x3ffffff,%rdx        # h[1]
1465         shr     \$14,$h1
1466         or      $r0,$h0
1467         shl     \$24,$h2
1468         and     \$0x3ffffff,$h0         # h[2]
1469         shr     \$40,$r1
1470         and     \$0x3ffffff,$h1         # h[3]
1471         or      $r1,$h2                 # h[4]
1472
1473         test    %r15,%r15
1474         jz      .Lstore_base2_26_avx2
1475
1476         vmovd   %rax#d,%x#$H0
1477         vmovd   %rdx#d,%x#$H1
1478         vmovd   $h0#d,%x#$H2
1479         vmovd   $h1#d,%x#$H3
1480         vmovd   $h2#d,%x#$H4
1481         jmp     .Lproceed_avx2
1482
1483 .align  32
1484 .Lstore_base2_64_avx2:
1485         mov     $h0,0($ctx)
1486         mov     $h1,8($ctx)
1487         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1488         jmp     .Ldone_avx2
1489
1490 .align  16
1491 .Lstore_base2_26_avx2:
1492         mov     %rax#d,0($ctx)          # store hash value base 2^26
1493         mov     %rdx#d,4($ctx)
1494         mov     $h0#d,8($ctx)
1495         mov     $h1#d,12($ctx)
1496         mov     $h2#d,16($ctx)
1497 .align  16
1498 .Ldone_avx2:
1499         mov     0(%rsp),%r15
1500         mov     8(%rsp),%r14
1501         mov     16(%rsp),%r13
1502         mov     24(%rsp),%r12
1503         mov     32(%rsp),%rbp
1504         mov     40(%rsp),%rbx
1505         lea     48(%rsp),%rsp
1506 .Lno_data_avx2:
1507 .Lblocks_avx2_epilogue:
1508         ret
1509
1510 .align  32
1511 .Lbase2_64_avx2:
1512         push    %rbx
1513         push    %rbp
1514         push    %r12
1515         push    %r13
1516         push    %r14
1517         push    %r15
1518 .Lbase2_64_avx2_body:
1519
1520         mov     $len,%r15               # reassign $len
1521
1522         mov     24($ctx),$r0            # load r
1523         mov     32($ctx),$s1
1524
1525         mov     0($ctx),$h0             # load hash value
1526         mov     8($ctx),$h1
1527         mov     16($ctx),$h2#d
1528
1529         mov     $s1,$r1
1530         mov     $s1,%rax
1531         shr     \$2,$s1
1532         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1533
1534         test    \$63,$len
1535         jz      .Linit_avx2
1536
1537 .Lbase2_64_pre_avx2:
1538         add     0($inp),$h0             # accumulate input
1539         adc     8($inp),$h1
1540         lea     16($inp),$inp
1541         adc     $padbit,$h2
1542         sub     \$16,%r15
1543
1544         call    __poly1305_block
1545         mov     $r1,%rax
1546
1547         test    \$63,%r15
1548         jnz     .Lbase2_64_pre_avx2
1549
1550 .Linit_avx2:
1551         ################################# base 2^64 -> base 2^26
1552         mov     $h0,%rax
1553         mov     $h0,%rdx
1554         shr     \$52,$h0
1555         mov     $h1,$d1
1556         mov     $h1,$d2
1557         shr     \$26,%rdx
1558         and     \$0x3ffffff,%rax        # h[0]
1559         shl     \$12,$d1
1560         and     \$0x3ffffff,%rdx        # h[1]
1561         shr     \$14,$h1
1562         or      $d1,$h0
1563         shl     \$24,$h2
1564         and     \$0x3ffffff,$h0         # h[2]
1565         shr     \$40,$d2
1566         and     \$0x3ffffff,$h1         # h[3]
1567         or      $d2,$h2                 # h[4]
1568
1569         vmovd   %rax#d,%x#$H0
1570         vmovd   %rdx#d,%x#$H1
1571         vmovd   $h0#d,%x#$H2
1572         vmovd   $h1#d,%x#$H3
1573         vmovd   $h2#d,%x#$H4
1574         movl    \$1,20($ctx)            # set is_base2_26
1575
1576         call    __poly1305_init_avx
1577
1578 .Lproceed_avx2:
1579         mov     %r15,$len                       # restore $len
1580         mov     OPENSSL_ia32cap_P+8(%rip),%r10d
1581         mov     \$`(1<<31|1<<30|1<<16)`,%r11d
1582
1583         mov     0(%rsp),%r15
1584         mov     8(%rsp),%r14
1585         mov     16(%rsp),%r13
1586         mov     24(%rsp),%r12
1587         mov     32(%rsp),%rbp
1588         mov     40(%rsp),%rbx
1589         lea     48(%rsp),%rax
1590         lea     48(%rsp),%rsp
1591 .Lbase2_64_avx2_epilogue:
1592         jmp     .Ldo_avx2
1593
1594 .align  32
1595 .Leven_avx2:
1596         mov             OPENSSL_ia32cap_P+8(%rip),%r10d
1597         mov             \$`(1<<31|1<<30|1<<16)`,%r11d
1598         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1599         vmovd           4*1($ctx),%x#$H1
1600         vmovd           4*2($ctx),%x#$H2
1601         vmovd           4*3($ctx),%x#$H3
1602         vmovd           4*4($ctx),%x#$H4
1603
1604 .Ldo_avx2:
1605 ___
1606 $code.=<<___            if ($avx>2);
1607         cmp             \$512,$len
1608         jb              .Lskip_avx512
1609         and             %r11d,%r10d
1610         cmp             %r11d,%r10d             # check for AVX512F+BW+VL
1611         je              .Lblocks_avx512
1612 .Lskip_avx512:
1613 ___
1614 $code.=<<___    if (!$win64);
1615         lea             -8(%rsp),%r11
1616         sub             \$0x128,%rsp
1617 ___
1618 $code.=<<___    if ($win64);
1619         lea             -0xf8(%rsp),%r11
1620         sub             \$0x1c8,%rsp
1621         vmovdqa         %xmm6,0x50(%r11)
1622         vmovdqa         %xmm7,0x60(%r11)
1623         vmovdqa         %xmm8,0x70(%r11)
1624         vmovdqa         %xmm9,0x80(%r11)
1625         vmovdqa         %xmm10,0x90(%r11)
1626         vmovdqa         %xmm11,0xa0(%r11)
1627         vmovdqa         %xmm12,0xb0(%r11)
1628         vmovdqa         %xmm13,0xc0(%r11)
1629         vmovdqa         %xmm14,0xd0(%r11)
1630         vmovdqa         %xmm15,0xe0(%r11)
1631 .Ldo_avx2_body:
1632 ___
1633 $code.=<<___;
1634         lea             .Lconst(%rip),%rcx
1635         lea             48+64($ctx),$ctx        # size optimization
1636         vmovdqa         96(%rcx),$T0            # .Lpermd_avx2
1637
1638         # expand and copy pre-calculated table to stack
1639         vmovdqu         `16*0-64`($ctx),%x#$T2
1640         and             \$-512,%rsp
1641         vmovdqu         `16*1-64`($ctx),%x#$T3
1642         vmovdqu         `16*2-64`($ctx),%x#$T4
1643         vmovdqu         `16*3-64`($ctx),%x#$D0
1644         vmovdqu         `16*4-64`($ctx),%x#$D1
1645         vmovdqu         `16*5-64`($ctx),%x#$D2
1646         lea             0x90(%rsp),%rax         # size optimization
1647         vmovdqu         `16*6-64`($ctx),%x#$D3
1648         vpermd          $T2,$T0,$T2             # 00003412 -> 14243444
1649         vmovdqu         `16*7-64`($ctx),%x#$D4
1650         vpermd          $T3,$T0,$T3
1651         vmovdqu         `16*8-64`($ctx),%x#$MASK
1652         vpermd          $T4,$T0,$T4
1653         vmovdqa         $T2,0x00(%rsp)
1654         vpermd          $D0,$T0,$D0
1655         vmovdqa         $T3,0x20-0x90(%rax)
1656         vpermd          $D1,$T0,$D1
1657         vmovdqa         $T4,0x40-0x90(%rax)
1658         vpermd          $D2,$T0,$D2
1659         vmovdqa         $D0,0x60-0x90(%rax)
1660         vpermd          $D3,$T0,$D3
1661         vmovdqa         $D1,0x80-0x90(%rax)
1662         vpermd          $D4,$T0,$D4
1663         vmovdqa         $D2,0xa0-0x90(%rax)
1664         vpermd          $MASK,$T0,$MASK
1665         vmovdqa         $D3,0xc0-0x90(%rax)
1666         vmovdqa         $D4,0xe0-0x90(%rax)
1667         vmovdqa         $MASK,0x100-0x90(%rax)
1668         vmovdqa         64(%rcx),$MASK          # .Lmask26
1669
1670         ################################################################
1671         # load input
1672         vmovdqu         16*0($inp),%x#$T0
1673         vmovdqu         16*1($inp),%x#$T1
1674         vinserti128     \$1,16*2($inp),$T0,$T0
1675         vinserti128     \$1,16*3($inp),$T1,$T1
1676         lea             16*4($inp),$inp
1677
1678         vpsrldq         \$6,$T0,$T2             # splat input
1679         vpsrldq         \$6,$T1,$T3
1680         vpunpckhqdq     $T1,$T0,$T4             # 4
1681         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1682         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1683
1684         vpsrlq          \$30,$T2,$T3
1685         vpsrlq          \$4,$T2,$T2
1686         vpsrlq          \$26,$T0,$T1
1687         vpsrlq          \$40,$T4,$T4            # 4
1688         vpand           $MASK,$T2,$T2           # 2
1689         vpand           $MASK,$T0,$T0           # 0
1690         vpand           $MASK,$T1,$T1           # 1
1691         vpand           $MASK,$T3,$T3           # 3
1692         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1693
1694         vpaddq          $H2,$T2,$H2             # accumulate input
1695         sub             \$64,$len
1696         jz              .Ltail_avx2
1697         jmp             .Loop_avx2
1698
1699 .align  32
1700 .Loop_avx2:
1701         ################################################################
1702         # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1703         # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1704         # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1705         # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1706         #   \________/\__________/
1707         ################################################################
1708         #vpaddq         $H2,$T2,$H2             # accumulate input
1709         vpaddq          $H0,$T0,$H0
1710         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1711         vpaddq          $H1,$T1,$H1
1712         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1713         vpaddq          $H3,$T3,$H3
1714         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1715         vpaddq          $H4,$T4,$H4
1716         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1717         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1718
1719         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1720         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1721         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1722         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1723         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1724         #
1725         # however, as h2 is "chronologically" first one available pull
1726         # corresponding operations up, so it's
1727         #
1728         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1729         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1730         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1731         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1732         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1733
1734         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1735         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1736         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1737         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1738         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1739
1740         vpmuludq        $H0,$T1,$T4             # h0*r1
1741         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1742         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1743         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1744         vpmuludq        $H3,$T1,$T4             # h3*r1
1745         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1746         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1747         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1748          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1749
1750         vpmuludq        $H0,$T0,$T4             # h0*r0
1751         vpmuludq        $H1,$T0,$H2             # h1*r0
1752         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1753         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1754         vpmuludq        $H3,$T0,$T4             # h3*r0
1755         vpmuludq        $H4,$T0,$H2             # h4*r0
1756          vmovdqu        16*0($inp),%x#$T0       # load input
1757         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1758         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1759          vinserti128    \$1,16*2($inp),$T0,$T0
1760
1761         vpmuludq        $H3,$T1,$T4             # h3*s2
1762         vpmuludq        $H4,$T1,$H2             # h4*s2
1763          vmovdqu        16*1($inp),%x#$T1
1764         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1765         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1766          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1767         vpmuludq        $H1,$T2,$T4             # h1*r2
1768         vpmuludq        $H0,$T2,$T2             # h0*r2
1769         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1770         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1771          vinserti128    \$1,16*3($inp),$T1,$T1
1772          lea            16*4($inp),$inp
1773
1774         vpmuludq        $H1,$H2,$T4             # h1*r3
1775         vpmuludq        $H0,$H2,$H2             # h0*r3
1776          vpsrldq        \$6,$T0,$T2             # splat input
1777         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1778         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1779         vpmuludq        $H3,$T3,$T4             # h3*s3
1780         vpmuludq        $H4,$T3,$H2             # h4*s3
1781          vpsrldq        \$6,$T1,$T3
1782         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1783         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1784          vpunpckhqdq    $T1,$T0,$T4             # 4
1785
1786         vpmuludq        $H3,$S4,$H3             # h3*s4
1787         vpmuludq        $H4,$S4,$H4             # h4*s4
1788          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1789         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1790         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1791          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1792         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1793         vpmuludq        $H1,$S4,$H0             # h1*s4
1794         vmovdqa         64(%rcx),$MASK          # .Lmask26
1795         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1796         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1797
1798         ################################################################
1799         # lazy reduction (interleaved with tail of input splat)
1800
1801         vpsrlq          \$26,$H3,$D3
1802         vpand           $MASK,$H3,$H3
1803         vpaddq          $D3,$H4,$H4             # h3 -> h4
1804
1805         vpsrlq          \$26,$H0,$D0
1806         vpand           $MASK,$H0,$H0
1807         vpaddq          $D0,$D1,$H1             # h0 -> h1
1808
1809         vpsrlq          \$26,$H4,$D4
1810         vpand           $MASK,$H4,$H4
1811
1812          vpsrlq         \$4,$T3,$T2
1813
1814         vpsrlq          \$26,$H1,$D1
1815         vpand           $MASK,$H1,$H1
1816         vpaddq          $D1,$H2,$H2             # h1 -> h2
1817
1818         vpaddq          $D4,$H0,$H0
1819         vpsllq          \$2,$D4,$D4
1820         vpaddq          $D4,$H0,$H0             # h4 -> h0
1821
1822          vpand          $MASK,$T2,$T2           # 2
1823          vpsrlq         \$26,$T0,$T1
1824
1825         vpsrlq          \$26,$H2,$D2
1826         vpand           $MASK,$H2,$H2
1827         vpaddq          $D2,$H3,$H3             # h2 -> h3
1828
1829          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1830          vpsrlq         \$30,$T3,$T3
1831
1832         vpsrlq          \$26,$H0,$D0
1833         vpand           $MASK,$H0,$H0
1834         vpaddq          $D0,$H1,$H1             # h0 -> h1
1835
1836          vpsrlq         \$40,$T4,$T4            # 4
1837
1838         vpsrlq          \$26,$H3,$D3
1839         vpand           $MASK,$H3,$H3
1840         vpaddq          $D3,$H4,$H4             # h3 -> h4
1841
1842          vpand          $MASK,$T0,$T0           # 0
1843          vpand          $MASK,$T1,$T1           # 1
1844          vpand          $MASK,$T3,$T3           # 3
1845          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1846
1847         sub             \$64,$len
1848         jnz             .Loop_avx2
1849
1850         .byte           0x66,0x90
1851 .Ltail_avx2:
1852         ################################################################
1853         # while above multiplications were by r^4 in all lanes, in last
1854         # iteration we multiply least significant lane by r^4 and most
1855         # significant one by r, so copy of above except that references
1856         # to the precomputed table are displaced by 4...
1857
1858         #vpaddq         $H2,$T2,$H2             # accumulate input
1859         vpaddq          $H0,$T0,$H0
1860         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1861         vpaddq          $H1,$T1,$H1
1862         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1863         vpaddq          $H3,$T3,$H3
1864         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1865         vpaddq          $H4,$T4,$H4
1866         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1867         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1868
1869         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1870         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1871         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1872         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1873         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1874
1875         vpmuludq        $H0,$T1,$T4             # h0*r1
1876         vpmuludq        $H1,$T1,$H2             # h1*r1
1877         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1878         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1879         vpmuludq        $H3,$T1,$T4             # h3*r1
1880         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1881         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1882         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1883
1884         vpmuludq        $H0,$T0,$T4             # h0*r0
1885         vpmuludq        $H1,$T0,$H2             # h1*r0
1886         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1887          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1888         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1889         vpmuludq        $H3,$T0,$T4             # h3*r0
1890         vpmuludq        $H4,$T0,$H2             # h4*r0
1891         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1892         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1893
1894         vpmuludq        $H3,$T1,$T4             # h3*s2
1895         vpmuludq        $H4,$T1,$H2             # h4*s2
1896         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1897         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1898          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1899         vpmuludq        $H1,$T2,$T4             # h1*r2
1900         vpmuludq        $H0,$T2,$T2             # h0*r2
1901         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1902         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1903
1904         vpmuludq        $H1,$H2,$T4             # h1*r3
1905         vpmuludq        $H0,$H2,$H2             # h0*r3
1906         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1907         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1908         vpmuludq        $H3,$T3,$T4             # h3*s3
1909         vpmuludq        $H4,$T3,$H2             # h4*s3
1910         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1911         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1912
1913         vpmuludq        $H3,$S4,$H3             # h3*s4
1914         vpmuludq        $H4,$S4,$H4             # h4*s4
1915         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1916         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1917         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
1918         vpmuludq        $H1,$S4,$H0             # h1*s4
1919         vmovdqa         64(%rcx),$MASK          # .Lmask26
1920         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1921         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1922
1923         ################################################################
1924         # horizontal addition
1925
1926         vpsrldq         \$8,$D1,$T1
1927         vpsrldq         \$8,$H2,$T2
1928         vpsrldq         \$8,$H3,$T3
1929         vpsrldq         \$8,$H4,$T4
1930         vpsrldq         \$8,$H0,$T0
1931         vpaddq          $T1,$D1,$D1
1932         vpaddq          $T2,$H2,$H2
1933         vpaddq          $T3,$H3,$H3
1934         vpaddq          $T4,$H4,$H4
1935         vpaddq          $T0,$H0,$H0
1936
1937         vpermq          \$0x2,$H3,$T3
1938         vpermq          \$0x2,$H4,$T4
1939         vpermq          \$0x2,$H0,$T0
1940         vpermq          \$0x2,$D1,$T1
1941         vpermq          \$0x2,$H2,$T2
1942         vpaddq          $T3,$H3,$H3
1943         vpaddq          $T4,$H4,$H4
1944         vpaddq          $T0,$H0,$H0
1945         vpaddq          $T1,$D1,$D1
1946         vpaddq          $T2,$H2,$H2
1947
1948         ################################################################
1949         # lazy reduction
1950
1951         vpsrlq          \$26,$H3,$D3
1952         vpand           $MASK,$H3,$H3
1953         vpaddq          $D3,$H4,$H4             # h3 -> h4
1954
1955         vpsrlq          \$26,$H0,$D0
1956         vpand           $MASK,$H0,$H0
1957         vpaddq          $D0,$D1,$H1             # h0 -> h1
1958
1959         vpsrlq          \$26,$H4,$D4
1960         vpand           $MASK,$H4,$H4
1961
1962         vpsrlq          \$26,$H1,$D1
1963         vpand           $MASK,$H1,$H1
1964         vpaddq          $D1,$H2,$H2             # h1 -> h2
1965
1966         vpaddq          $D4,$H0,$H0
1967         vpsllq          \$2,$D4,$D4
1968         vpaddq          $D4,$H0,$H0             # h4 -> h0
1969
1970         vpsrlq          \$26,$H2,$D2
1971         vpand           $MASK,$H2,$H2
1972         vpaddq          $D2,$H3,$H3             # h2 -> h3
1973
1974         vpsrlq          \$26,$H0,$D0
1975         vpand           $MASK,$H0,$H0
1976         vpaddq          $D0,$H1,$H1             # h0 -> h1
1977
1978         vpsrlq          \$26,$H3,$D3
1979         vpand           $MASK,$H3,$H3
1980         vpaddq          $D3,$H4,$H4             # h3 -> h4
1981
1982         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1983         vmovd           %x#$H1,`4*1-48-64`($ctx)
1984         vmovd           %x#$H2,`4*2-48-64`($ctx)
1985         vmovd           %x#$H3,`4*3-48-64`($ctx)
1986         vmovd           %x#$H4,`4*4-48-64`($ctx)
1987 ___
1988 $code.=<<___    if ($win64);
1989         vmovdqa         0x50(%r11),%xmm6
1990         vmovdqa         0x60(%r11),%xmm7
1991         vmovdqa         0x70(%r11),%xmm8
1992         vmovdqa         0x80(%r11),%xmm9
1993         vmovdqa         0x90(%r11),%xmm10
1994         vmovdqa         0xa0(%r11),%xmm11
1995         vmovdqa         0xb0(%r11),%xmm12
1996         vmovdqa         0xc0(%r11),%xmm13
1997         vmovdqa         0xd0(%r11),%xmm14
1998         vmovdqa         0xe0(%r11),%xmm15
1999         lea             0xf8(%r11),%rsp
2000 .Ldo_avx2_epilogue:
2001 ___
2002 $code.=<<___    if (!$win64);
2003         lea             8(%r11),%rsp
2004 ___
2005 $code.=<<___;
2006         vzeroupper
2007         ret
2008 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
2009 ___
2010 #######################################################################
2011 if ($avx>2) {
2012 # On entry we have input length divisible by 64. But since inner loop
2013 # processes 128 bytes per iteration, cases when length is not divisible
2014 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2015 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2016 # for this tail, we wouldn't have to even allocate stack frame...
2017
2018 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
2019 my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
2020 my $PADBIT="%zmm30";
2021 my $GATHER="%ymm31";
2022
2023 $code.=<<___;
2024 .type   poly1305_blocks_avx512,\@function,4
2025 .align  32
2026 poly1305_blocks_avx512:
2027 .Lblocks_avx512:
2028         vzeroupper
2029 ___
2030 $code.=<<___    if (!$win64);
2031         lea             -8(%rsp),%r11
2032         sub             \$0x128,%rsp
2033 ___
2034 $code.=<<___    if ($win64);
2035         lea             -0xf8(%rsp),%r11
2036         sub             \$0x1c8,%rsp
2037         vmovdqa         %xmm6,0x50(%r11)
2038         vmovdqa         %xmm7,0x60(%r11)
2039         vmovdqa         %xmm8,0x70(%r11)
2040         vmovdqa         %xmm9,0x80(%r11)
2041         vmovdqa         %xmm10,0x90(%r11)
2042         vmovdqa         %xmm11,0xa0(%r11)
2043         vmovdqa         %xmm12,0xb0(%r11)
2044         vmovdqa         %xmm13,0xc0(%r11)
2045         vmovdqa         %xmm14,0xd0(%r11)
2046         vmovdqa         %xmm15,0xe0(%r11)
2047 .Ldo_avx512_body:
2048 ___
2049 $code.=<<___;
2050         lea             .Lconst(%rip),%rcx
2051         lea             48+64($ctx),$ctx        # size optimization
2052         vmovdqa         96(%rcx),$T2            # .Lpermd_avx2
2053
2054         # expand pre-calculated table
2055         vmovdqu32       `16*0-64`($ctx),%x#$R0
2056         and             \$-512,%rsp
2057         vmovdqu32       `16*1-64`($ctx),%x#$R1
2058         vmovdqu32       `16*2-64`($ctx),%x#$S1
2059         vmovdqu32       `16*3-64`($ctx),%x#$R2
2060         vmovdqu32       `16*4-64`($ctx),%x#$S2
2061         vmovdqu32       `16*5-64`($ctx),%x#$R3
2062         vmovdqu32       `16*6-64`($ctx),%x#$S3
2063         vmovdqu32       `16*7-64`($ctx),%x#$R4
2064         vmovdqu32       `16*8-64`($ctx),%x#$S4
2065         vpermd          $R0,$T2,$R0             # 00003412 -> 14243444
2066         vmovdqa64       64(%rcx),$MASK          # .Lmask26
2067         vpermd          $R1,$T2,$R1
2068         vpermd          $S1,$T2,$S1
2069         vpermd          $R2,$T2,$R2
2070         vmovdqa32       $R0,0x00(%rsp)          # save in case $len%128 != 0
2071          vpsrlq         \$32,$R0,$T0            # 14243444 -> 01020304
2072         vpermd          $S2,$T2,$S2
2073         vmovdqa32       $R1,0x20(%rsp)
2074          vpsrlq         \$32,$R1,$T1
2075         vpermd          $R3,$T2,$R3
2076         vmovdqa32       $S1,0x40(%rsp)
2077         vpermd          $S3,$T2,$S3
2078         vpermd          $R4,$T2,$R4
2079         vmovdqa32       $R2,0x60(%rsp)
2080         vpermd          $S4,$T2,$S4
2081         vmovdqa32       $S2,0x80(%rsp)
2082         vmovdqa32       $R3,0xa0(%rsp)
2083         vmovdqa32       $S3,0xc0(%rsp)
2084         vmovdqa32       $R4,0xe0(%rsp)
2085         vmovdqa32       $S4,0x100(%rsp)
2086
2087         ################################################################
2088         # calculate 5th through 8th powers of the key
2089         #
2090         # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2091         # d1 = r0'*r1 + r1'*r0   + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2092         # d2 = r0'*r2 + r1'*r1   + r2'*r0   + r3'*5*r4 + r4'*5*r3
2093         # d3 = r0'*r3 + r1'*r2   + r2'*r1   + r3'*r0   + r4'*5*r4
2094         # d4 = r0'*r4 + r1'*r3   + r2'*r2   + r3'*r1   + r4'*r0
2095
2096         vpmuludq        $T0,$R0,$D0             # d0 = r0'*r0
2097         vpmuludq        $T0,$R1,$D1             # d1 = r0'*r1
2098         vpmuludq        $T0,$R2,$D2             # d2 = r0'*r2
2099         vpmuludq        $T0,$R3,$D3             # d3 = r0'*r3
2100         vpmuludq        $T0,$R4,$D4             # d4 = r0'*r4
2101          vpsrlq         \$32,$R2,$T2
2102
2103         vpmuludq        $T1,$S4,$M0
2104         vpmuludq        $T1,$R0,$M1
2105         vpmuludq        $T1,$R1,$M2
2106         vpmuludq        $T1,$R2,$M3
2107         vpmuludq        $T1,$R3,$M4
2108          vpsrlq         \$32,$R3,$T3
2109         vpaddq          $M0,$D0,$D0             # d0 += r1'*5*r4
2110         vpaddq          $M1,$D1,$D1             # d1 += r1'*r0
2111         vpaddq          $M2,$D2,$D2             # d2 += r1'*r1
2112         vpaddq          $M3,$D3,$D3             # d3 += r1'*r2
2113         vpaddq          $M4,$D4,$D4             # d4 += r1'*r3
2114
2115         vpmuludq        $T2,$S3,$M0
2116         vpmuludq        $T2,$S4,$M1
2117         vpmuludq        $T2,$R1,$M3
2118         vpmuludq        $T2,$R2,$M4
2119         vpmuludq        $T2,$R0,$M2
2120          vpsrlq         \$32,$R4,$T4
2121         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r3
2122         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r4
2123         vpaddq          $M3,$D3,$D3             # d3 += r2'*r1
2124         vpaddq          $M4,$D4,$D4             # d4 += r2'*r2
2125         vpaddq          $M2,$D2,$D2             # d2 += r2'*r0
2126
2127         vpmuludq        $T3,$S2,$M0
2128         vpmuludq        $T3,$R0,$M3
2129         vpmuludq        $T3,$R1,$M4
2130         vpmuludq        $T3,$S3,$M1
2131         vpmuludq        $T3,$S4,$M2
2132         vpaddq          $M0,$D0,$D0             # d0 += r3'*5*r2
2133         vpaddq          $M3,$D3,$D3             # d3 += r3'*r0
2134         vpaddq          $M4,$D4,$D4             # d4 += r3'*r1
2135         vpaddq          $M1,$D1,$D1             # d1 += r3'*5*r3
2136         vpaddq          $M2,$D2,$D2             # d2 += r3'*5*r4
2137
2138         vpmuludq        $T4,$S4,$M3
2139         vpmuludq        $T4,$R0,$M4
2140         vpmuludq        $T4,$S1,$M0
2141         vpmuludq        $T4,$S2,$M1
2142         vpmuludq        $T4,$S3,$M2
2143         vpaddq          $M3,$D3,$D3             # d3 += r2'*5*r4
2144         vpaddq          $M4,$D4,$D4             # d4 += r2'*r0
2145         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r1
2146         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r2
2147         vpaddq          $M2,$D2,$D2             # d2 += r2'*5*r3
2148
2149         ################################################################
2150         # load input
2151         vmovdqu64       16*0($inp),%z#$T3
2152         vmovdqu64       16*4($inp),%z#$T4
2153         lea             16*8($inp),$inp
2154
2155         ################################################################
2156         # lazy reduction
2157
2158         vpsrlq          \$26,$D3,$M3
2159         vpandq          $MASK,$D3,$D3
2160         vpaddq          $M3,$D4,$D4             # d3 -> d4
2161
2162         vpsrlq          \$26,$D0,$M0
2163         vpandq          $MASK,$D0,$D0
2164         vpaddq          $M0,$D1,$D1             # d0 -> d1
2165
2166         vpsrlq          \$26,$D4,$M4
2167         vpandq          $MASK,$D4,$D4
2168
2169         vpsrlq          \$26,$D1,$M1
2170         vpandq          $MASK,$D1,$D1
2171         vpaddq          $M1,$D2,$D2             # d1 -> d2
2172
2173         vpaddq          $M4,$D0,$D0
2174         vpsllq          \$2,$M4,$M4
2175         vpaddq          $M4,$D0,$D0             # d4 -> d0
2176
2177         vpsrlq          \$26,$D2,$M2
2178         vpandq          $MASK,$D2,$D2
2179         vpaddq          $M2,$D3,$D3             # d2 -> d3
2180
2181         vpsrlq          \$26,$D0,$M0
2182         vpandq          $MASK,$D0,$D0
2183         vpaddq          $M0,$D1,$D1             # d0 -> d1
2184
2185         vpsrlq          \$26,$D3,$M3
2186         vpandq          $MASK,$D3,$D3
2187         vpaddq          $M3,$D4,$D4             # d3 -> d4
2188
2189 ___
2190 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));            # switch to %zmm domain
2191 map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
2192 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2193 map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
2194 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2195 map(s/%y/%z/,($MASK));
2196 $code.=<<___;
2197         ################################################################
2198         # at this point we have 14243444 in $R0-$S4 and 05060708 in
2199         # $D0-$D4, ...
2200
2201         vpunpcklqdq     $T4,$T3,$T0     # transpose input
2202         vpunpckhqdq     $T4,$T3,$T4
2203
2204         # ... since input 64-bit lanes are ordered as 73625140, we could
2205         # "vperm" it to 76543210 (here and in each loop iteration), *or*
2206         # we could just flow along, hence the goal for $R0-$S4 is
2207         # 1858286838784888 ...
2208
2209         mov             \$0b0110011001100110,%eax
2210         mov             \$0b1100110011001100,%r8d
2211         mov             \$0b0101010101010101,%r9d
2212         kmovw           %eax,%k1
2213         kmovw           %r8d,%k2
2214         kmovw           %r9d,%k3
2215
2216         vpbroadcastq    %x#$D0,$M0      # 0808080808080808
2217         vpbroadcastq    %x#$D1,$M1
2218         vpbroadcastq    %x#$D2,$M2
2219         vpbroadcastq    %x#$D3,$M3
2220         vpbroadcastq    %x#$D4,$M4
2221
2222         vpexpandd       $D0,${D0}{%k1}  # 05060708 -> -05--06--07--08-
2223         vpexpandd       $D1,${D1}{%k1}
2224         vpexpandd       $D2,${D2}{%k1}
2225         vpexpandd       $D3,${D3}{%k1}
2226         vpexpandd       $D4,${D4}{%k1}
2227
2228         vpexpandd       $R0,${D0}{%k2}  # -05--06--07--08- -> 145-246-347-448-
2229         vpexpandd       $R1,${D1}{%k2}
2230         vpexpandd       $R2,${D2}{%k2}
2231         vpexpandd       $R3,${D3}{%k2}
2232         vpexpandd       $R4,${D4}{%k2}
2233
2234         vpblendmd       $M0,$D0,${R0}{%k3}      # 1858286838784888
2235         vpblendmd       $M1,$D1,${R1}{%k3}
2236         vpblendmd       $M2,$D2,${R2}{%k3}
2237         vpblendmd       $M3,$D3,${R3}{%k3}
2238         vpblendmd       $M4,$D4,${R4}{%k3}
2239
2240         vpslld          \$2,$R1,$S1             # *5
2241         vpslld          \$2,$R2,$S2
2242         vpslld          \$2,$R3,$S3
2243         vpslld          \$2,$R4,$S4
2244         vpaddd          $R1,$S1,$S1
2245         vpaddd          $R2,$S2,$S2
2246         vpaddd          $R3,$S3,$S3
2247         vpaddd          $R4,$S4,$S4
2248
2249         vpbroadcastq    %x#$MASK,$MASK
2250         vpbroadcastq    32(%rcx),$PADBIT        # .L129
2251
2252         vpsrlq          \$52,$T0,$T2            # splat input
2253         vpsllq          \$12,$T4,$T3
2254         vporq           $T3,$T2,$T2
2255         vpsrlq          \$26,$T0,$T1
2256         vpsrlq          \$14,$T4,$T3
2257         vpsrlq          \$40,$T4,$T4            # 4
2258         vpandq          $MASK,$T2,$T2           # 2
2259         vpandq          $MASK,$T0,$T0           # 0
2260         vpandq          $MASK,$T1,$T1           # 1
2261         vpandq          $MASK,$T3,$T3           # 3
2262         #vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2263
2264         vpaddq          $H2,$T2,$H2             # accumulate input
2265         mov             \$0x0f,%eax
2266         sub             \$192,$len
2267         jbe             .Ltail_avx512
2268         jmp             .Loop_avx512
2269
2270 .align  32
2271 .Loop_avx512:
2272         ################################################################
2273         # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2274         # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2275         # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2276         # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2277         # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2278         # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2279         # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2280         # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2281         #   \________/\___________/
2282         ################################################################
2283         #vpaddq         $H2,$T2,$H2             # accumulate input
2284
2285         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
2286         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
2287         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
2288         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
2289         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2290         #
2291         # however, as h2 is "chronologically" first one available pull
2292         # corresponding operations up, so it's
2293         #
2294         # d3 = h2*r1   + h0*r3 + h1*r2   + h3*r0 + h4*5*r4
2295         # d4 = h2*r2   + h0*r4 + h1*r3   + h3*r1 + h4*r0
2296         # d0 = h2*5*r3 + h0*r0 + h1*5*r4         + h3*5*r2 + h4*5*r1
2297         # d1 = h2*5*r4 + h0*r1           + h1*r0 + h3*5*r3 + h4*5*r2
2298         # d2 = h2*r0           + h0*r2   + h1*r1 + h3*5*r4 + h4*5*r3
2299
2300         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2301          vpaddq         $H0,$T0,$H0
2302         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2303         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2304         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2305          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2306         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2307          vpaddq         $H1,$T1,$H1             # accumulate input
2308          vpaddq         $H3,$T3,$H3
2309          vpaddq         $H4,$T4,$H4
2310
2311           vmovdqu64     16*0($inp),$T3          # load input
2312           vmovdqu64     16*4($inp),$T4
2313           lea           16*8($inp),$inp
2314         vpmuludq        $H0,$R3,$M3
2315         vpmuludq        $H0,$R4,$M4
2316         vpmuludq        $H0,$R0,$M0
2317         vpmuludq        $H0,$R1,$M1
2318         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2319         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2320         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2321         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2322
2323         vpmuludq        $H1,$R2,$M3
2324         vpmuludq        $H1,$R3,$M4
2325         vpmuludq        $H1,$S4,$M0
2326         vpmuludq        $H0,$R2,$M2
2327         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2328         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2329         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2330         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2331
2332           vpunpcklqdq   $T4,$T3,$T0             # transpose input
2333           vpunpckhqdq   $T4,$T3,$T4
2334
2335         vpmuludq        $H3,$R0,$M3
2336         vpmuludq        $H3,$R1,$M4
2337         vpmuludq        $H1,$R0,$M1
2338         vpmuludq        $H1,$R1,$M2
2339         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2340         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2341         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2342         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2343
2344         vpmuludq        $H4,$S4,$M3
2345         vpmuludq        $H4,$R0,$M4
2346         vpmuludq        $H3,$S2,$M0
2347         vpmuludq        $H3,$S3,$M1
2348         vpaddq          $M3,$D3,$D3             # d3 += h4*s4
2349         vpmuludq        $H3,$S4,$M2
2350         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2351         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2352         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2353         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2354
2355         vpmuludq        $H4,$S1,$M0
2356         vpmuludq        $H4,$S2,$M1
2357         vpmuludq        $H4,$S3,$M2
2358         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2359         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2360         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2361
2362         ################################################################
2363         # lazy reduction (interleaved with input splat)
2364
2365          vpsrlq         \$52,$T0,$T2            # splat input
2366          vpsllq         \$12,$T4,$T3
2367
2368         vpsrlq          \$26,$D3,$H3
2369         vpandq          $MASK,$D3,$D3
2370         vpaddq          $H3,$D4,$H4             # h3 -> h4
2371
2372          vporq          $T3,$T2,$T2
2373
2374         vpsrlq          \$26,$H0,$D0
2375         vpandq          $MASK,$H0,$H0
2376         vpaddq          $D0,$H1,$H1             # h0 -> h1
2377
2378          vpandq         $MASK,$T2,$T2           # 2
2379
2380         vpsrlq          \$26,$H4,$D4
2381         vpandq          $MASK,$H4,$H4
2382
2383         vpsrlq          \$26,$H1,$D1
2384         vpandq          $MASK,$H1,$H1
2385         vpaddq          $D1,$H2,$H2             # h1 -> h2
2386
2387         vpaddq          $D4,$H0,$H0
2388         vpsllq          \$2,$D4,$D4
2389         vpaddq          $D4,$H0,$H0             # h4 -> h0
2390
2391          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2392          vpsrlq         \$26,$T0,$T1
2393
2394         vpsrlq          \$26,$H2,$D2
2395         vpandq          $MASK,$H2,$H2
2396         vpaddq          $D2,$D3,$H3             # h2 -> h3
2397
2398          vpsrlq         \$14,$T4,$T3
2399
2400         vpsrlq          \$26,$H0,$D0
2401         vpandq          $MASK,$H0,$H0
2402         vpaddq          $D0,$H1,$H1             # h0 -> h1
2403
2404          vpsrlq         \$40,$T4,$T4            # 4
2405
2406         vpsrlq          \$26,$H3,$D3
2407         vpandq          $MASK,$H3,$H3
2408         vpaddq          $D3,$H4,$H4             # h3 -> h4
2409
2410          vpandq         $MASK,$T0,$T0           # 0
2411          vpandq         $MASK,$T1,$T1           # 1
2412          vpandq         $MASK,$T3,$T3           # 3
2413          #vporq         $PADBIT,$T4,$T4         # padbit, yes, always
2414
2415         sub             \$128,$len
2416         ja              .Loop_avx512
2417
2418 .Ltail_avx512:
2419         ################################################################
2420         # while above multiplications were by r^8 in all lanes, in last
2421         # iteration we multiply least significant lane by r^8 and most
2422         # significant one by r, that's why table gets shifted...
2423
2424         vpsrlq          \$32,$R0,$R0            # 0105020603070408
2425         vpsrlq          \$32,$R1,$R1
2426         vpsrlq          \$32,$R2,$R2
2427         vpsrlq          \$32,$S3,$S3
2428         vpsrlq          \$32,$S4,$S4
2429         vpsrlq          \$32,$R3,$R3
2430         vpsrlq          \$32,$R4,$R4
2431         vpsrlq          \$32,$S1,$S1
2432         vpsrlq          \$32,$S2,$S2
2433
2434         ################################################################
2435         # load either next or last 64 byte of input
2436         lea             ($inp,$len),$inp
2437
2438         #vpaddq         $H2,$T2,$H2             # accumulate input
2439         vpaddq          $H0,$T0,$H0
2440
2441         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2442         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2443         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2444         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2445         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2446          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2447          vpaddq         $H1,$T1,$H1             # accumulate input
2448          vpaddq         $H3,$T3,$H3
2449          vpaddq         $H4,$T4,$H4
2450
2451           vmovdqu64     16*0($inp),%x#$T0
2452         vpmuludq        $H0,$R3,$M3
2453         vpmuludq        $H0,$R4,$M4
2454         vpmuludq        $H0,$R0,$M0
2455         vpmuludq        $H0,$R1,$M1
2456         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2457         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2458         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2459         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2460
2461           vmovdqu64     16*1($inp),%x#$T1
2462         vpmuludq        $H1,$R2,$M3
2463         vpmuludq        $H1,$R3,$M4
2464         vpmuludq        $H1,$S4,$M0
2465         vpmuludq        $H0,$R2,$M2
2466         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2467         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2468         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2469         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2470
2471           vinserti64x2  \$1,16*2($inp),$T0,$T0
2472         vpmuludq        $H3,$R0,$M3
2473         vpmuludq        $H3,$R1,$M4
2474         vpmuludq        $H1,$R0,$M1
2475         vpmuludq        $H1,$R1,$M2
2476         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2477         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2478         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2479         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2480
2481           vinserti64x2  \$1,16*3($inp),$T1,$T1
2482         vpmuludq        $H4,$S4,$M3
2483         vpmuludq        $H4,$R0,$M4
2484         vpmuludq        $H3,$S2,$M0
2485         vpmuludq        $H3,$S3,$M1
2486         vpmuludq        $H3,$S4,$M2
2487         vpaddq          $M3,$D3,$H3             # h3 = d3 + h4*s4
2488         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2489         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2490         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2491         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2492
2493         vpmuludq        $H4,$S1,$M0
2494         vpmuludq        $H4,$S2,$M1
2495         vpmuludq        $H4,$S3,$M2
2496         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2497         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2498         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2499
2500         ################################################################
2501         # horizontal addition
2502
2503         mov             \$1,%eax
2504         vpsrldq         \$8,$H3,$D3
2505         vpsrldq         \$8,$D4,$H4
2506         vpsrldq         \$8,$H0,$D0
2507         vpsrldq         \$8,$H1,$D1
2508         vpsrldq         \$8,$H2,$D2
2509         vpaddq          $D3,$H3,$H3
2510         vpaddq          $D4,$H4,$H4
2511         vpaddq          $D0,$H0,$H0
2512         vpaddq          $D1,$H1,$H1
2513         vpaddq          $D2,$H2,$H2
2514
2515         kmovw           %eax,%k3
2516         vpermq          \$0x2,$H3,$D3
2517         vpermq          \$0x2,$H4,$D4
2518         vpermq          \$0x2,$H0,$D0
2519         vpermq          \$0x2,$H1,$D1
2520         vpermq          \$0x2,$H2,$D2
2521         vpaddq          $D3,$H3,$H3
2522         vpaddq          $D4,$H4,$H4
2523         vpaddq          $D0,$H0,$H0
2524         vpaddq          $D1,$H1,$H1
2525         vpaddq          $D2,$H2,$H2
2526
2527         vextracti64x4   \$0x1,$H3,%y#$D3
2528         vextracti64x4   \$0x1,$H4,%y#$D4
2529         vextracti64x4   \$0x1,$H0,%y#$D0
2530         vextracti64x4   \$0x1,$H1,%y#$D1
2531         vextracti64x4   \$0x1,$H2,%y#$D2
2532         vpaddq          $D3,$H3,${H3}{%k3}{z}   # keep single qword in case
2533         vpaddq          $D4,$H4,${H4}{%k3}{z}   # it's passed to .Ltail_avx2
2534         vpaddq          $D0,$H0,${H0}{%k3}{z}
2535         vpaddq          $D1,$H1,${H1}{%k3}{z}
2536         vpaddq          $D2,$H2,${H2}{%k3}{z}
2537 ___
2538 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2539 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2540 $code.=<<___;
2541         ################################################################
2542         # lazy reduction (interleaved with input splat)
2543
2544         vpsrlq          \$26,$H3,$D3
2545         vpandq          $MASK,$H3,$H3
2546          vpsrldq        \$6,$T0,$T2             # splat input
2547          vpsrldq        \$6,$T1,$T3
2548          vpunpckhqdq    $T1,$T0,$T4             # 4
2549         vpaddq          $D3,$H4,$H4             # h3 -> h4
2550
2551         vpsrlq          \$26,$H0,$D0
2552         vpandq          $MASK,$H0,$H0
2553          vpunpcklqdq    $T3,$T2,$T2             # 2:3
2554          vpunpcklqdq    $T1,$T0,$T0             # 0:1
2555         vpaddq          $D0,$H1,$H1             # h0 -> h1
2556
2557         vpsrlq          \$26,$H4,$D4
2558         vpandq          $MASK,$H4,$H4
2559
2560         vpsrlq          \$26,$H1,$D1
2561         vpandq          $MASK,$H1,$H1
2562          vpsrlq         \$30,$T2,$T3
2563          vpsrlq         \$4,$T2,$T2
2564         vpaddq          $D1,$H2,$H2             # h1 -> h2
2565
2566         vpaddq          $D4,$H0,$H0
2567         vpsllq          \$2,$D4,$D4
2568          vpsrlq         \$26,$T0,$T1
2569          vpsrlq         \$40,$T4,$T4            # 4
2570         vpaddq          $D4,$H0,$H0             # h4 -> h0
2571
2572         vpsrlq          \$26,$H2,$D2
2573         vpandq          $MASK,$H2,$H2
2574          vpandq         $MASK,$T2,$T2           # 2
2575          vpandq         $MASK,$T0,$T0           # 0
2576         vpaddq          $D2,$H3,$H3             # h2 -> h3
2577
2578         vpsrlq          \$26,$H0,$D0
2579         vpandq          $MASK,$H0,$H0
2580          vpaddq         $H2,$T2,$H2             # accumulate input for .Ltail_avx2
2581          vpandq         $MASK,$T1,$T1           # 1
2582         vpaddq          $D0,$H1,$H1             # h0 -> h1
2583
2584         vpsrlq          \$26,$H3,$D3
2585         vpandq          $MASK,$H3,$H3
2586          vpandq         $MASK,$T3,$T3           # 3
2587          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2588         vpaddq          $D3,$H4,$H4             # h3 -> h4
2589
2590         lea             0x90(%rsp),%rax         # size optimization for .Ltail_avx2
2591         add             \$64,$len
2592         jnz             .Ltail_avx2
2593
2594         vpsubq          $T2,$H2,$H2             # undo input accumulation
2595         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2596         vmovd           %x#$H1,`4*1-48-64`($ctx)
2597         vmovd           %x#$H2,`4*2-48-64`($ctx)
2598         vmovd           %x#$H3,`4*3-48-64`($ctx)
2599         vmovd           %x#$H4,`4*4-48-64`($ctx)
2600         vzeroall
2601 ___
2602 $code.=<<___    if ($win64);
2603         movdqa          0x50(%r11),%xmm6
2604         movdqa          0x60(%r11),%xmm7
2605         movdqa          0x70(%r11),%xmm8
2606         movdqa          0x80(%r11),%xmm9
2607         movdqa          0x90(%r11),%xmm10
2608         movdqa          0xa0(%r11),%xmm11
2609         movdqa          0xb0(%r11),%xmm12
2610         movdqa          0xc0(%r11),%xmm13
2611         movdqa          0xd0(%r11),%xmm14
2612         movdqa          0xe0(%r11),%xmm15
2613         lea             0xf8(%r11),%rsp
2614 .Ldo_avx512_epilogue:
2615 ___
2616 $code.=<<___    if (!$win64);
2617         lea             8(%r11),%rsp
2618 ___
2619 $code.=<<___;
2620         ret
2621 .size   poly1305_blocks_avx512,.-poly1305_blocks_avx512
2622 ___
2623 }       }
2624 $code.=<<___;
2625 .align  64
2626 .Lconst:
2627 .Lmask24:
2628 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2629 .L129:
2630 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2631 .Lmask26:
2632 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2633 .Lpermd_avx2:
2634 .long   2,2,2,3,2,0,2,1
2635 ___
2636 }
2637
2638 $code.=<<___;
2639 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2640 .align  16
2641 ___
2642
2643 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2644 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2645 if ($win64) {
2646 $rec="%rcx";
2647 $frame="%rdx";
2648 $context="%r8";
2649 $disp="%r9";
2650
2651 $code.=<<___;
2652 .extern __imp_RtlVirtualUnwind
2653 .type   se_handler,\@abi-omnipotent
2654 .align  16
2655 se_handler:
2656         push    %rsi
2657         push    %rdi
2658         push    %rbx
2659         push    %rbp
2660         push    %r12
2661         push    %r13
2662         push    %r14
2663         push    %r15
2664         pushfq
2665         sub     \$64,%rsp
2666
2667         mov     120($context),%rax      # pull context->Rax
2668         mov     248($context),%rbx      # pull context->Rip
2669
2670         mov     8($disp),%rsi           # disp->ImageBase
2671         mov     56($disp),%r11          # disp->HandlerData
2672
2673         mov     0(%r11),%r10d           # HandlerData[0]
2674         lea     (%rsi,%r10),%r10        # prologue label
2675         cmp     %r10,%rbx               # context->Rip<.Lprologue
2676         jb      .Lcommon_seh_tail
2677
2678         mov     152($context),%rax      # pull context->Rsp
2679
2680         mov     4(%r11),%r10d           # HandlerData[1]
2681         lea     (%rsi,%r10),%r10        # epilogue label
2682         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2683         jae     .Lcommon_seh_tail
2684
2685         lea     48(%rax),%rax
2686
2687         mov     -8(%rax),%rbx
2688         mov     -16(%rax),%rbp
2689         mov     -24(%rax),%r12
2690         mov     -32(%rax),%r13
2691         mov     -40(%rax),%r14
2692         mov     -48(%rax),%r15
2693         mov     %rbx,144($context)      # restore context->Rbx
2694         mov     %rbp,160($context)      # restore context->Rbp
2695         mov     %r12,216($context)      # restore context->R12
2696         mov     %r13,224($context)      # restore context->R13
2697         mov     %r14,232($context)      # restore context->R14
2698         mov     %r15,240($context)      # restore context->R14
2699
2700         jmp     .Lcommon_seh_tail
2701 .size   se_handler,.-se_handler
2702
2703 .type   avx_handler,\@abi-omnipotent
2704 .align  16
2705 avx_handler:
2706         push    %rsi
2707         push    %rdi
2708         push    %rbx
2709         push    %rbp
2710         push    %r12
2711         push    %r13
2712         push    %r14
2713         push    %r15
2714         pushfq
2715         sub     \$64,%rsp
2716
2717         mov     120($context),%rax      # pull context->Rax
2718         mov     248($context),%rbx      # pull context->Rip
2719
2720         mov     8($disp),%rsi           # disp->ImageBase
2721         mov     56($disp),%r11          # disp->HandlerData
2722
2723         mov     0(%r11),%r10d           # HandlerData[0]
2724         lea     (%rsi,%r10),%r10        # prologue label
2725         cmp     %r10,%rbx               # context->Rip<prologue label
2726         jb      .Lcommon_seh_tail
2727
2728         mov     152($context),%rax      # pull context->Rsp
2729
2730         mov     4(%r11),%r10d           # HandlerData[1]
2731         lea     (%rsi,%r10),%r10        # epilogue label
2732         cmp     %r10,%rbx               # context->Rip>=epilogue label
2733         jae     .Lcommon_seh_tail
2734
2735         mov     208($context),%rax      # pull context->R11
2736
2737         lea     0x50(%rax),%rsi
2738         lea     0xf8(%rax),%rax
2739         lea     512($context),%rdi      # &context.Xmm6
2740         mov     \$20,%ecx
2741         .long   0xa548f3fc              # cld; rep movsq
2742
2743 .Lcommon_seh_tail:
2744         mov     8(%rax),%rdi
2745         mov     16(%rax),%rsi
2746         mov     %rax,152($context)      # restore context->Rsp
2747         mov     %rsi,168($context)      # restore context->Rsi
2748         mov     %rdi,176($context)      # restore context->Rdi
2749
2750         mov     40($disp),%rdi          # disp->ContextRecord
2751         mov     $context,%rsi           # context
2752         mov     \$154,%ecx              # sizeof(CONTEXT)
2753         .long   0xa548f3fc              # cld; rep movsq
2754
2755         mov     $disp,%rsi
2756         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2757         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2758         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2759         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2760         mov     40(%rsi),%r10           # disp->ContextRecord
2761         lea     56(%rsi),%r11           # &disp->HandlerData
2762         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2763         mov     %r10,32(%rsp)           # arg5
2764         mov     %r11,40(%rsp)           # arg6
2765         mov     %r12,48(%rsp)           # arg7
2766         mov     %rcx,56(%rsp)           # arg8, (NULL)
2767         call    *__imp_RtlVirtualUnwind(%rip)
2768
2769         mov     \$1,%eax                # ExceptionContinueSearch
2770         add     \$64,%rsp
2771         popfq
2772         pop     %r15
2773         pop     %r14
2774         pop     %r13
2775         pop     %r12
2776         pop     %rbp
2777         pop     %rbx
2778         pop     %rdi
2779         pop     %rsi
2780         ret
2781 .size   avx_handler,.-avx_handler
2782
2783 .section        .pdata
2784 .align  4
2785         .rva    .LSEH_begin_poly1305_init
2786         .rva    .LSEH_end_poly1305_init
2787         .rva    .LSEH_info_poly1305_init
2788
2789         .rva    .LSEH_begin_poly1305_blocks
2790         .rva    .LSEH_end_poly1305_blocks
2791         .rva    .LSEH_info_poly1305_blocks
2792
2793         .rva    .LSEH_begin_poly1305_emit
2794         .rva    .LSEH_end_poly1305_emit
2795         .rva    .LSEH_info_poly1305_emit
2796 ___
2797 $code.=<<___ if ($avx);
2798         .rva    .LSEH_begin_poly1305_blocks_avx
2799         .rva    .Lbase2_64_avx
2800         .rva    .LSEH_info_poly1305_blocks_avx_1
2801
2802         .rva    .Lbase2_64_avx
2803         .rva    .Leven_avx
2804         .rva    .LSEH_info_poly1305_blocks_avx_2
2805
2806         .rva    .Leven_avx
2807         .rva    .LSEH_end_poly1305_blocks_avx
2808         .rva    .LSEH_info_poly1305_blocks_avx_3
2809
2810         .rva    .LSEH_begin_poly1305_emit_avx
2811         .rva    .LSEH_end_poly1305_emit_avx
2812         .rva    .LSEH_info_poly1305_emit_avx
2813 ___
2814 $code.=<<___ if ($avx>1);
2815         .rva    .LSEH_begin_poly1305_blocks_avx2
2816         .rva    .Lbase2_64_avx2
2817         .rva    .LSEH_info_poly1305_blocks_avx2_1
2818
2819         .rva    .Lbase2_64_avx2
2820         .rva    .Leven_avx2
2821         .rva    .LSEH_info_poly1305_blocks_avx2_2
2822
2823         .rva    .Leven_avx2
2824         .rva    .LSEH_end_poly1305_blocks_avx2
2825         .rva    .LSEH_info_poly1305_blocks_avx2_3
2826 ___
2827 $code.=<<___ if ($avx>2);
2828         .rva    .LSEH_begin_poly1305_blocks_avx512
2829         .rva    .LSEH_end_poly1305_blocks_avx512
2830         .rva    .LSEH_info_poly1305_blocks_avx512
2831 ___
2832 $code.=<<___;
2833 .section        .xdata
2834 .align  8
2835 .LSEH_info_poly1305_init:
2836         .byte   9,0,0,0
2837         .rva    se_handler
2838         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2839
2840 .LSEH_info_poly1305_blocks:
2841         .byte   9,0,0,0
2842         .rva    se_handler
2843         .rva    .Lblocks_body,.Lblocks_epilogue
2844
2845 .LSEH_info_poly1305_emit:
2846         .byte   9,0,0,0
2847         .rva    se_handler
2848         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2849 ___
2850 $code.=<<___ if ($avx);
2851 .LSEH_info_poly1305_blocks_avx_1:
2852         .byte   9,0,0,0
2853         .rva    se_handler
2854         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
2855
2856 .LSEH_info_poly1305_blocks_avx_2:
2857         .byte   9,0,0,0
2858         .rva    se_handler
2859         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
2860
2861 .LSEH_info_poly1305_blocks_avx_3:
2862         .byte   9,0,0,0
2863         .rva    avx_handler
2864         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
2865
2866 .LSEH_info_poly1305_emit_avx:
2867         .byte   9,0,0,0
2868         .rva    se_handler
2869         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2870 ___
2871 $code.=<<___ if ($avx>1);
2872 .LSEH_info_poly1305_blocks_avx2_1:
2873         .byte   9,0,0,0
2874         .rva    se_handler
2875         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
2876
2877 .LSEH_info_poly1305_blocks_avx2_2:
2878         .byte   9,0,0,0
2879         .rva    se_handler
2880         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
2881
2882 .LSEH_info_poly1305_blocks_avx2_3:
2883         .byte   9,0,0,0
2884         .rva    avx_handler
2885         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
2886 ___
2887 $code.=<<___ if ($avx>2);
2888 .LSEH_info_poly1305_blocks_avx512:
2889         .byte   9,0,0,0
2890         .rva    avx_handler
2891         .rva    .Ldo_avx512_body,.Ldo_avx512_epilogue           # HandlerData[]
2892 ___
2893 }
2894
2895 foreach (split('\n',$code)) {
2896         s/\`([^\`]*)\`/eval($1)/ge;
2897         s/%r([a-z]+)#d/%e$1/g;
2898         s/%r([0-9]+)#d/%r$1d/g;
2899         s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
2900
2901         print $_,"\n";
2902 }
2903 close STDOUT;