poly1305/asm/poly1305-x86_64.pl: add CFI annotations.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Initial release.
22 #
23 # December 2016
24 #
25 # Add AVX512F+VL+BW code path.
26 #
27 # Numbers are cycles per processed byte with poly1305_blocks alone,
28 # measured with rdtsc at fixed clock frequency.
29 #
30 #               IALU/gcc-4.8(*) AVX(**)         AVX2
31 # P4            4.46/+120%      -
32 # Core 2        2.41/+90%       -
33 # Westmere      1.88/+120%      -
34 # Sandy Bridge  1.39/+140%      1.10
35 # Haswell       1.14/+175%      1.11            0.65
36 # Skylake       1.13/+120%      0.96            0.51
37 # Silvermont    2.83/+95%       -
38 # Goldmont      1.70/+180%      -
39 # VIA Nano      1.82/+150%      -
40 # Sledgehammer  1.38/+160%      -
41 # Bulldozer     2.30/+130%      0.97
42 #
43 # (*)   improvement coefficients relative to clang are more modest and
44 #       are ~50% on most processors, in both cases we are comparing to
45 #       __int128 code;
46 # (**)  SSE2 implementation was attempted, but among non-AVX processors
47 #       it was faster than integer-only code only on older Intel P4 and
48 #       Core processors, 50-30%, less newer processor is, but slower on
49 #       contemporary ones, for example almost 2x slower on Atom, and as
50 #       former are naturally disappearing, SSE2 is deemed unnecessary;
51
52 $flavour = shift;
53 $output  = shift;
54 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
55
56 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
57
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
60 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
61 die "can't locate x86_64-xlate.pl";
62
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26);
66 }
67
68 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
69            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
70         $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12);
71         $avx += 2 if ($1==2.11 && $2>=8);
72 }
73
74 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
75            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
76         $avx = ($1>=10) + ($1>=12);
77 }
78
79 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
80         $avx = ($2>=3.0) + ($2>3.0);
81 }
82
83 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
84 *STDOUT=*OUT;
85
86 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
87 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
88 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
89 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
90
91 sub poly1305_iteration {
92 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
93 # output:       $h0-$h2 *= $r0-$r1
94 $code.=<<___;
95         mulq    $h0                     # h0*r1
96         mov     %rax,$d2
97          mov    $r0,%rax
98         mov     %rdx,$d3
99
100         mulq    $h0                     # h0*r0
101         mov     %rax,$h0                # future $h0
102          mov    $r0,%rax
103         mov     %rdx,$d1
104
105         mulq    $h1                     # h1*r0
106         add     %rax,$d2
107          mov    $s1,%rax
108         adc     %rdx,$d3
109
110         mulq    $h1                     # h1*s1
111          mov    $h2,$h1                 # borrow $h1
112         add     %rax,$h0
113         adc     %rdx,$d1
114
115         imulq   $s1,$h1                 # h2*s1
116         add     $h1,$d2
117          mov    $d1,$h1
118         adc     \$0,$d3
119
120         imulq   $r0,$h2                 # h2*r0
121         add     $d2,$h1
122         mov     \$-4,%rax               # mask value
123         adc     $h2,$d3
124
125         and     $d3,%rax                # last reduction step
126         mov     $d3,$h2
127         shr     \$2,$d3
128         and     \$3,$h2
129         add     $d3,%rax
130         add     %rax,$h0
131         adc     \$0,$h1
132         adc     \$0,$h2
133 ___
134 }
135
136 ########################################################################
137 # Layout of opaque area is following.
138 #
139 #       unsigned __int64 h[3];          # current hash value base 2^64
140 #       unsigned __int64 r[2];          # key value base 2^64
141
142 $code.=<<___;
143 .text
144
145 .extern OPENSSL_ia32cap_P
146
147 .globl  poly1305_init
148 .hidden poly1305_init
149 .globl  poly1305_blocks
150 .hidden poly1305_blocks
151 .globl  poly1305_emit
152 .hidden poly1305_emit
153
154 .type   poly1305_init,\@function,3
155 .align  32
156 poly1305_init:
157         xor     %rax,%rax
158         mov     %rax,0($ctx)            # initialize hash value
159         mov     %rax,8($ctx)
160         mov     %rax,16($ctx)
161
162         cmp     \$0,$inp
163         je      .Lno_key
164
165         lea     poly1305_blocks(%rip),%r10
166         lea     poly1305_emit(%rip),%r11
167 ___
168 $code.=<<___    if ($avx);
169         mov     OPENSSL_ia32cap_P+4(%rip),%r9
170         lea     poly1305_blocks_avx(%rip),%rax
171         lea     poly1305_emit_avx(%rip),%rcx
172         bt      \$`60-32`,%r9           # AVX?
173         cmovc   %rax,%r10
174         cmovc   %rcx,%r11
175 ___
176 $code.=<<___    if ($avx>1);
177         lea     poly1305_blocks_avx2(%rip),%rax
178         bt      \$`5+32`,%r9            # AVX2?
179         cmovc   %rax,%r10
180 ___
181 $code.=<<___    if ($avx>3);
182         mov     \$`(1<<31|1<<21|1<<16)`,%rax
183         shr     \$32,%r9
184         and     %rax,%r9
185         cmp     %rax,%r9
186         je      .Linit_base2_44
187 ___
188 $code.=<<___;
189         mov     \$0x0ffffffc0fffffff,%rax
190         mov     \$0x0ffffffc0ffffffc,%rcx
191         and     0($inp),%rax
192         and     8($inp),%rcx
193         mov     %rax,24($ctx)
194         mov     %rcx,32($ctx)
195 ___
196 $code.=<<___    if ($flavour !~ /elf32/);
197         mov     %r10,0(%rdx)
198         mov     %r11,8(%rdx)
199 ___
200 $code.=<<___    if ($flavour =~ /elf32/);
201         mov     %r10d,0(%rdx)
202         mov     %r11d,4(%rdx)
203 ___
204 $code.=<<___;
205         mov     \$1,%eax
206 .Lno_key:
207         ret
208 .size   poly1305_init,.-poly1305_init
209
210 .type   poly1305_blocks,\@function,4
211 .align  32
212 poly1305_blocks:
213 .cfi_startproc
214 .Lblocks:
215         shr     \$4,$len
216         jz      .Lno_data               # too short
217
218         push    %rbx
219 .cfi_push       %rbx
220         push    %rbp
221 .cfi_push       %rbp
222         push    %r12
223 .cfi_push       %r12
224         push    %r13
225 .cfi_push       %r13
226         push    %r14
227 .cfi_push       %r14
228         push    %r15
229 .cfi_push       %r15
230 .Lblocks_body:
231
232         mov     $len,%r15               # reassign $len
233
234         mov     24($ctx),$r0            # load r
235         mov     32($ctx),$s1
236
237         mov     0($ctx),$h0             # load hash value
238         mov     8($ctx),$h1
239         mov     16($ctx),$h2
240
241         mov     $s1,$r1
242         shr     \$2,$s1
243         mov     $r1,%rax
244         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
245         jmp     .Loop
246
247 .align  32
248 .Loop:
249         add     0($inp),$h0             # accumulate input
250         adc     8($inp),$h1
251         lea     16($inp),$inp
252         adc     $padbit,$h2
253 ___
254         &poly1305_iteration();
255 $code.=<<___;
256         mov     $r1,%rax
257         dec     %r15                    # len-=16
258         jnz     .Loop
259
260         mov     $h0,0($ctx)             # store hash value
261         mov     $h1,8($ctx)
262         mov     $h2,16($ctx)
263
264         mov     0(%rsp),%r15
265 .cfi_restore    %r15
266         mov     8(%rsp),%r14
267 .cfi_restore    %r14
268         mov     16(%rsp),%r13
269 .cfi_restore    %r13
270         mov     24(%rsp),%r12
271 .cfi_restore    %r12
272         mov     32(%rsp),%rbp
273 .cfi_restore    %rbp
274         mov     40(%rsp),%rbx
275 .cfi_restore    %rbx
276         lea     48(%rsp),%rsp
277 .cfi_adjust_cfa_offset  -48
278 .Lno_data:
279 .Lblocks_epilogue:
280         ret
281 .cfi_endproc
282 .size   poly1305_blocks,.-poly1305_blocks
283
284 .type   poly1305_emit,\@function,3
285 .align  32
286 poly1305_emit:
287 .Lemit:
288         mov     0($ctx),%r8     # load hash value
289         mov     8($ctx),%r9
290         mov     16($ctx),%r10
291
292         mov     %r8,%rax
293         add     \$5,%r8         # compare to modulus
294         mov     %r9,%rcx
295         adc     \$0,%r9
296         adc     \$0,%r10
297         shr     \$2,%r10        # did 130-bit value overfow?
298         cmovnz  %r8,%rax
299         cmovnz  %r9,%rcx
300
301         add     0($nonce),%rax  # accumulate nonce
302         adc     8($nonce),%rcx
303         mov     %rax,0($mac)    # write result
304         mov     %rcx,8($mac)
305
306         ret
307 .size   poly1305_emit,.-poly1305_emit
308 ___
309 if ($avx) {
310
311 ########################################################################
312 # Layout of opaque area is following.
313 #
314 #       unsigned __int32 h[5];          # current hash value base 2^26
315 #       unsigned __int32 is_base2_26;
316 #       unsigned __int64 r[2];          # key value base 2^64
317 #       unsigned __int64 pad;
318 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
319 #
320 # where r^n are base 2^26 digits of degrees of multiplier key. There are
321 # 5 digits, but last four are interleaved with multiples of 5, totalling
322 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
323
324 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
325     map("%xmm$_",(0..15));
326
327 $code.=<<___;
328 .type   __poly1305_block,\@abi-omnipotent
329 .align  32
330 __poly1305_block:
331 ___
332         &poly1305_iteration();
333 $code.=<<___;
334         ret
335 .size   __poly1305_block,.-__poly1305_block
336
337 .type   __poly1305_init_avx,\@abi-omnipotent
338 .align  32
339 __poly1305_init_avx:
340         mov     $r0,$h0
341         mov     $r1,$h1
342         xor     $h2,$h2
343
344         lea     48+64($ctx),$ctx        # size optimization
345
346         mov     $r1,%rax
347         call    __poly1305_block        # r^2
348
349         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
350         mov     \$0x3ffffff,%edx
351         mov     $h0,$d1
352         and     $h0#d,%eax
353         mov     $r0,$d2
354         and     $r0#d,%edx
355         mov     %eax,`16*0+0-64`($ctx)
356         shr     \$26,$d1
357         mov     %edx,`16*0+4-64`($ctx)
358         shr     \$26,$d2
359
360         mov     \$0x3ffffff,%eax
361         mov     \$0x3ffffff,%edx
362         and     $d1#d,%eax
363         and     $d2#d,%edx
364         mov     %eax,`16*1+0-64`($ctx)
365         lea     (%rax,%rax,4),%eax      # *5
366         mov     %edx,`16*1+4-64`($ctx)
367         lea     (%rdx,%rdx,4),%edx      # *5
368         mov     %eax,`16*2+0-64`($ctx)
369         shr     \$26,$d1
370         mov     %edx,`16*2+4-64`($ctx)
371         shr     \$26,$d2
372
373         mov     $h1,%rax
374         mov     $r1,%rdx
375         shl     \$12,%rax
376         shl     \$12,%rdx
377         or      $d1,%rax
378         or      $d2,%rdx
379         and     \$0x3ffffff,%eax
380         and     \$0x3ffffff,%edx
381         mov     %eax,`16*3+0-64`($ctx)
382         lea     (%rax,%rax,4),%eax      # *5
383         mov     %edx,`16*3+4-64`($ctx)
384         lea     (%rdx,%rdx,4),%edx      # *5
385         mov     %eax,`16*4+0-64`($ctx)
386         mov     $h1,$d1
387         mov     %edx,`16*4+4-64`($ctx)
388         mov     $r1,$d2
389
390         mov     \$0x3ffffff,%eax
391         mov     \$0x3ffffff,%edx
392         shr     \$14,$d1
393         shr     \$14,$d2
394         and     $d1#d,%eax
395         and     $d2#d,%edx
396         mov     %eax,`16*5+0-64`($ctx)
397         lea     (%rax,%rax,4),%eax      # *5
398         mov     %edx,`16*5+4-64`($ctx)
399         lea     (%rdx,%rdx,4),%edx      # *5
400         mov     %eax,`16*6+0-64`($ctx)
401         shr     \$26,$d1
402         mov     %edx,`16*6+4-64`($ctx)
403         shr     \$26,$d2
404
405         mov     $h2,%rax
406         shl     \$24,%rax
407         or      %rax,$d1
408         mov     $d1#d,`16*7+0-64`($ctx)
409         lea     ($d1,$d1,4),$d1         # *5
410         mov     $d2#d,`16*7+4-64`($ctx)
411         lea     ($d2,$d2,4),$d2         # *5
412         mov     $d1#d,`16*8+0-64`($ctx)
413         mov     $d2#d,`16*8+4-64`($ctx)
414
415         mov     $r1,%rax
416         call    __poly1305_block        # r^3
417
418         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
419         mov     $h0,$d1
420         and     $h0#d,%eax
421         shr     \$26,$d1
422         mov     %eax,`16*0+12-64`($ctx)
423
424         mov     \$0x3ffffff,%edx
425         and     $d1#d,%edx
426         mov     %edx,`16*1+12-64`($ctx)
427         lea     (%rdx,%rdx,4),%edx      # *5
428         shr     \$26,$d1
429         mov     %edx,`16*2+12-64`($ctx)
430
431         mov     $h1,%rax
432         shl     \$12,%rax
433         or      $d1,%rax
434         and     \$0x3ffffff,%eax
435         mov     %eax,`16*3+12-64`($ctx)
436         lea     (%rax,%rax,4),%eax      # *5
437         mov     $h1,$d1
438         mov     %eax,`16*4+12-64`($ctx)
439
440         mov     \$0x3ffffff,%edx
441         shr     \$14,$d1
442         and     $d1#d,%edx
443         mov     %edx,`16*5+12-64`($ctx)
444         lea     (%rdx,%rdx,4),%edx      # *5
445         shr     \$26,$d1
446         mov     %edx,`16*6+12-64`($ctx)
447
448         mov     $h2,%rax
449         shl     \$24,%rax
450         or      %rax,$d1
451         mov     $d1#d,`16*7+12-64`($ctx)
452         lea     ($d1,$d1,4),$d1         # *5
453         mov     $d1#d,`16*8+12-64`($ctx)
454
455         mov     $r1,%rax
456         call    __poly1305_block        # r^4
457
458         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
459         mov     $h0,$d1
460         and     $h0#d,%eax
461         shr     \$26,$d1
462         mov     %eax,`16*0+8-64`($ctx)
463
464         mov     \$0x3ffffff,%edx
465         and     $d1#d,%edx
466         mov     %edx,`16*1+8-64`($ctx)
467         lea     (%rdx,%rdx,4),%edx      # *5
468         shr     \$26,$d1
469         mov     %edx,`16*2+8-64`($ctx)
470
471         mov     $h1,%rax
472         shl     \$12,%rax
473         or      $d1,%rax
474         and     \$0x3ffffff,%eax
475         mov     %eax,`16*3+8-64`($ctx)
476         lea     (%rax,%rax,4),%eax      # *5
477         mov     $h1,$d1
478         mov     %eax,`16*4+8-64`($ctx)
479
480         mov     \$0x3ffffff,%edx
481         shr     \$14,$d1
482         and     $d1#d,%edx
483         mov     %edx,`16*5+8-64`($ctx)
484         lea     (%rdx,%rdx,4),%edx      # *5
485         shr     \$26,$d1
486         mov     %edx,`16*6+8-64`($ctx)
487
488         mov     $h2,%rax
489         shl     \$24,%rax
490         or      %rax,$d1
491         mov     $d1#d,`16*7+8-64`($ctx)
492         lea     ($d1,$d1,4),$d1         # *5
493         mov     $d1#d,`16*8+8-64`($ctx)
494
495         lea     -48-64($ctx),$ctx       # size [de-]optimization
496         ret
497 .size   __poly1305_init_avx,.-__poly1305_init_avx
498
499 .type   poly1305_blocks_avx,\@function,4
500 .align  32
501 poly1305_blocks_avx:
502 .cfi_startproc
503         mov     20($ctx),%r8d           # is_base2_26
504         cmp     \$128,$len
505         jae     .Lblocks_avx
506         test    %r8d,%r8d
507         jz      .Lblocks
508
509 .Lblocks_avx:
510         and     \$-16,$len
511         jz      .Lno_data_avx
512
513         vzeroupper
514
515         test    %r8d,%r8d
516         jz      .Lbase2_64_avx
517
518         test    \$31,$len
519         jz      .Leven_avx
520
521         push    %rbx
522 .cfi_push       %rbx
523         push    %rbp
524 .cfi_push       %rbp
525         push    %r12
526 .cfi_push       %r12
527         push    %r13
528 .cfi_push       %r13
529         push    %r14
530 .cfi_push       %r14
531         push    %r15
532 .cfi_push       %r15
533 .Lblocks_avx_body:
534
535         mov     $len,%r15               # reassign $len
536
537         mov     0($ctx),$d1             # load hash value
538         mov     8($ctx),$d2
539         mov     16($ctx),$h2#d
540
541         mov     24($ctx),$r0            # load r
542         mov     32($ctx),$s1
543
544         ################################# base 2^26 -> base 2^64
545         mov     $d1#d,$h0#d
546         and     \$`-1*(1<<31)`,$d1
547         mov     $d2,$r1                 # borrow $r1
548         mov     $d2#d,$h1#d
549         and     \$`-1*(1<<31)`,$d2
550
551         shr     \$6,$d1
552         shl     \$52,$r1
553         add     $d1,$h0
554         shr     \$12,$h1
555         shr     \$18,$d2
556         add     $r1,$h0
557         adc     $d2,$h1
558
559         mov     $h2,$d1
560         shl     \$40,$d1
561         shr     \$24,$h2
562         add     $d1,$h1
563         adc     \$0,$h2                 # can be partially reduced...
564
565         mov     \$-4,$d2                # ... so reduce
566         mov     $h2,$d1
567         and     $h2,$d2
568         shr     \$2,$d1
569         and     \$3,$h2
570         add     $d2,$d1                 # =*5
571         add     $d1,$h0
572         adc     \$0,$h1
573         adc     \$0,$h2
574
575         mov     $s1,$r1
576         mov     $s1,%rax
577         shr     \$2,$s1
578         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
579
580         add     0($inp),$h0             # accumulate input
581         adc     8($inp),$h1
582         lea     16($inp),$inp
583         adc     $padbit,$h2
584
585         call    __poly1305_block
586
587         test    $padbit,$padbit         # if $padbit is zero,
588         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
589
590         ################################# base 2^64 -> base 2^26
591         mov     $h0,%rax
592         mov     $h0,%rdx
593         shr     \$52,$h0
594         mov     $h1,$r0
595         mov     $h1,$r1
596         shr     \$26,%rdx
597         and     \$0x3ffffff,%rax        # h[0]
598         shl     \$12,$r0
599         and     \$0x3ffffff,%rdx        # h[1]
600         shr     \$14,$h1
601         or      $r0,$h0
602         shl     \$24,$h2
603         and     \$0x3ffffff,$h0         # h[2]
604         shr     \$40,$r1
605         and     \$0x3ffffff,$h1         # h[3]
606         or      $r1,$h2                 # h[4]
607
608         sub     \$16,%r15
609         jz      .Lstore_base2_26_avx
610
611         vmovd   %rax#d,$H0
612         vmovd   %rdx#d,$H1
613         vmovd   $h0#d,$H2
614         vmovd   $h1#d,$H3
615         vmovd   $h2#d,$H4
616         jmp     .Lproceed_avx
617
618 .align  32
619 .Lstore_base2_64_avx:
620         mov     $h0,0($ctx)
621         mov     $h1,8($ctx)
622         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
623         jmp     .Ldone_avx
624
625 .align  16
626 .Lstore_base2_26_avx:
627         mov     %rax#d,0($ctx)          # store hash value base 2^26
628         mov     %rdx#d,4($ctx)
629         mov     $h0#d,8($ctx)
630         mov     $h1#d,12($ctx)
631         mov     $h2#d,16($ctx)
632 .align  16
633 .Ldone_avx:
634         mov     0(%rsp),%r15
635 .cfi_restore    %r15
636         mov     8(%rsp),%r14
637 .cfi_restore    %r14
638         mov     16(%rsp),%r13
639 .cfi_restore    %r13
640         mov     24(%rsp),%r12
641 .cfi_restore    %r12
642         mov     32(%rsp),%rbp
643 .cfi_restore    %rbp
644         mov     40(%rsp),%rbx
645 .cfi_restore    %rbx
646         lea     48(%rsp),%rsp
647 .cfi_adjust_cfa_offset  -48
648 .Lno_data_avx:
649 .Lblocks_avx_epilogue:
650         ret
651 .cfi_endproc
652
653 .align  32
654 .Lbase2_64_avx:
655 .cfi_startproc
656         push    %rbx
657 .cfi_push       %rbx
658         push    %rbp
659 .cfi_push       %rbp
660         push    %r12
661 .cfi_push       %r12
662         push    %r13
663 .cfi_push       %r13
664         push    %r14
665 .cfi_push       %r14
666         push    %r15
667 .cfi_push       %r15
668 .Lbase2_64_avx_body:
669
670         mov     $len,%r15               # reassign $len
671
672         mov     24($ctx),$r0            # load r
673         mov     32($ctx),$s1
674
675         mov     0($ctx),$h0             # load hash value
676         mov     8($ctx),$h1
677         mov     16($ctx),$h2#d
678
679         mov     $s1,$r1
680         mov     $s1,%rax
681         shr     \$2,$s1
682         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
683
684         test    \$31,$len
685         jz      .Linit_avx
686
687         add     0($inp),$h0             # accumulate input
688         adc     8($inp),$h1
689         lea     16($inp),$inp
690         adc     $padbit,$h2
691         sub     \$16,%r15
692
693         call    __poly1305_block
694
695 .Linit_avx:
696         ################################# base 2^64 -> base 2^26
697         mov     $h0,%rax
698         mov     $h0,%rdx
699         shr     \$52,$h0
700         mov     $h1,$d1
701         mov     $h1,$d2
702         shr     \$26,%rdx
703         and     \$0x3ffffff,%rax        # h[0]
704         shl     \$12,$d1
705         and     \$0x3ffffff,%rdx        # h[1]
706         shr     \$14,$h1
707         or      $d1,$h0
708         shl     \$24,$h2
709         and     \$0x3ffffff,$h0         # h[2]
710         shr     \$40,$d2
711         and     \$0x3ffffff,$h1         # h[3]
712         or      $d2,$h2                 # h[4]
713
714         vmovd   %rax#d,$H0
715         vmovd   %rdx#d,$H1
716         vmovd   $h0#d,$H2
717         vmovd   $h1#d,$H3
718         vmovd   $h2#d,$H4
719         movl    \$1,20($ctx)            # set is_base2_26
720
721         call    __poly1305_init_avx
722
723 .Lproceed_avx:
724         mov     %r15,$len
725
726         mov     0(%rsp),%r15
727 .cfi_restore    %r15
728         mov     8(%rsp),%r14
729 .cfi_restore    %r14
730         mov     16(%rsp),%r13
731 .cfi_restore    %r13
732         mov     24(%rsp),%r12
733 .cfi_restore    %r12
734         mov     32(%rsp),%rbp
735 .cfi_restore    %rbp
736         mov     40(%rsp),%rbx
737 .cfi_restore    %rbx
738         lea     48(%rsp),%rax
739         lea     48(%rsp),%rsp
740 .cfi_adjust_cfa_offset  -48
741 .Lbase2_64_avx_epilogue:
742         jmp     .Ldo_avx
743 .cfi_endproc
744
745 .align  32
746 .Leven_avx:
747 .cfi_startproc
748         vmovd           4*0($ctx),$H0           # load hash value
749         vmovd           4*1($ctx),$H1
750         vmovd           4*2($ctx),$H2
751         vmovd           4*3($ctx),$H3
752         vmovd           4*4($ctx),$H4
753
754 .Ldo_avx:
755 ___
756 $code.=<<___    if (!$win64);
757         lea             -0x58(%rsp),%r11
758 .cfi_def_cfa            %r11,0x60
759         sub             \$0x178,%rsp
760 ___
761 $code.=<<___    if ($win64);
762         lea             -0xf8(%rsp),%r11
763         sub             \$0x218,%rsp
764         vmovdqa         %xmm6,0x50(%r11)
765         vmovdqa         %xmm7,0x60(%r11)
766         vmovdqa         %xmm8,0x70(%r11)
767         vmovdqa         %xmm9,0x80(%r11)
768         vmovdqa         %xmm10,0x90(%r11)
769         vmovdqa         %xmm11,0xa0(%r11)
770         vmovdqa         %xmm12,0xb0(%r11)
771         vmovdqa         %xmm13,0xc0(%r11)
772         vmovdqa         %xmm14,0xd0(%r11)
773         vmovdqa         %xmm15,0xe0(%r11)
774 .Ldo_avx_body:
775 ___
776 $code.=<<___;
777         sub             \$64,$len
778         lea             -32($inp),%rax
779         cmovc           %rax,$inp
780
781         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
782         lea             `16*3+64`($ctx),$ctx    # size optimization
783         lea             .Lconst(%rip),%rcx
784
785         ################################################################
786         # load input
787         vmovdqu         16*2($inp),$T0
788         vmovdqu         16*3($inp),$T1
789         vmovdqa         64(%rcx),$MASK          # .Lmask26
790
791         vpsrldq         \$6,$T0,$T2             # splat input
792         vpsrldq         \$6,$T1,$T3
793         vpunpckhqdq     $T1,$T0,$T4             # 4
794         vpunpcklqdq     $T1,$T0,$T0             # 0:1
795         vpunpcklqdq     $T3,$T2,$T3             # 2:3
796
797         vpsrlq          \$40,$T4,$T4            # 4
798         vpsrlq          \$26,$T0,$T1
799         vpand           $MASK,$T0,$T0           # 0
800         vpsrlq          \$4,$T3,$T2
801         vpand           $MASK,$T1,$T1           # 1
802         vpsrlq          \$30,$T3,$T3
803         vpand           $MASK,$T2,$T2           # 2
804         vpand           $MASK,$T3,$T3           # 3
805         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
806
807         jbe             .Lskip_loop_avx
808
809         # expand and copy pre-calculated table to stack
810         vmovdqu         `16*1-64`($ctx),$D1
811         vmovdqu         `16*2-64`($ctx),$D2
812         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
813         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
814         vmovdqa         $D3,-0x90(%r11)
815         vmovdqa         $D0,0x00(%rsp)
816         vpshufd         \$0xEE,$D1,$D4
817         vmovdqu         `16*3-64`($ctx),$D0
818         vpshufd         \$0x44,$D1,$D1
819         vmovdqa         $D4,-0x80(%r11)
820         vmovdqa         $D1,0x10(%rsp)
821         vpshufd         \$0xEE,$D2,$D3
822         vmovdqu         `16*4-64`($ctx),$D1
823         vpshufd         \$0x44,$D2,$D2
824         vmovdqa         $D3,-0x70(%r11)
825         vmovdqa         $D2,0x20(%rsp)
826         vpshufd         \$0xEE,$D0,$D4
827         vmovdqu         `16*5-64`($ctx),$D2
828         vpshufd         \$0x44,$D0,$D0
829         vmovdqa         $D4,-0x60(%r11)
830         vmovdqa         $D0,0x30(%rsp)
831         vpshufd         \$0xEE,$D1,$D3
832         vmovdqu         `16*6-64`($ctx),$D0
833         vpshufd         \$0x44,$D1,$D1
834         vmovdqa         $D3,-0x50(%r11)
835         vmovdqa         $D1,0x40(%rsp)
836         vpshufd         \$0xEE,$D2,$D4
837         vmovdqu         `16*7-64`($ctx),$D1
838         vpshufd         \$0x44,$D2,$D2
839         vmovdqa         $D4,-0x40(%r11)
840         vmovdqa         $D2,0x50(%rsp)
841         vpshufd         \$0xEE,$D0,$D3
842         vmovdqu         `16*8-64`($ctx),$D2
843         vpshufd         \$0x44,$D0,$D0
844         vmovdqa         $D3,-0x30(%r11)
845         vmovdqa         $D0,0x60(%rsp)
846         vpshufd         \$0xEE,$D1,$D4
847         vpshufd         \$0x44,$D1,$D1
848         vmovdqa         $D4,-0x20(%r11)
849         vmovdqa         $D1,0x70(%rsp)
850         vpshufd         \$0xEE,$D2,$D3
851          vmovdqa        0x00(%rsp),$D4          # preload r0^2
852         vpshufd         \$0x44,$D2,$D2
853         vmovdqa         $D3,-0x10(%r11)
854         vmovdqa         $D2,0x80(%rsp)
855
856         jmp             .Loop_avx
857
858 .align  32
859 .Loop_avx:
860         ################################################################
861         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
862         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
863         #   \___________________/
864         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
865         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
866         #   \___________________/ \____________________/
867         #
868         # Note that we start with inp[2:3]*r^2. This is because it
869         # doesn't depend on reduction in previous iteration.
870         ################################################################
871         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
872         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
873         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
874         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
875         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
876         #
877         # though note that $Tx and $Hx are "reversed" in this section,
878         # and $D4 is preloaded with r0^2...
879
880         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
881         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
882           vmovdqa       $H2,0x20(%r11)                          # offload hash
883         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
884          vmovdqa        0x10(%rsp),$H2          # r1^2
885         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
886         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
887
888           vmovdqa       $H0,0x00(%r11)                          #
889         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
890           vmovdqa       $H1,0x10(%r11)                          #
891         vpmuludq        $T3,$H2,$H1             # h3*r1
892         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
893         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
894           vmovdqa       $H3,0x30(%r11)                          #
895         vpmuludq        $T2,$H2,$H0             # h2*r1
896         vpmuludq        $T1,$H2,$H1             # h1*r1
897         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
898          vmovdqa        0x30(%rsp),$H3          # r2^2
899         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
900           vmovdqa       $H4,0x40(%r11)                          #
901         vpmuludq        $T0,$H2,$H2             # h0*r1
902          vpmuludq       $T2,$H3,$H0             # h2*r2
903         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
904
905          vmovdqa        0x40(%rsp),$H4          # s2^2
906         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
907         vpmuludq        $T1,$H3,$H1             # h1*r2
908         vpmuludq        $T0,$H3,$H3             # h0*r2
909         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
910          vmovdqa        0x50(%rsp),$H2          # r3^2
911         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
912         vpmuludq        $T4,$H4,$H0             # h4*s2
913         vpmuludq        $T3,$H4,$H4             # h3*s2
914         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
915          vmovdqa        0x60(%rsp),$H3          # s3^2
916         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
917
918          vmovdqa        0x80(%rsp),$H4          # s4^2
919         vpmuludq        $T1,$H2,$H1             # h1*r3
920         vpmuludq        $T0,$H2,$H2             # h0*r3
921         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
922         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
923         vpmuludq        $T4,$H3,$H0             # h4*s3
924         vpmuludq        $T3,$H3,$H1             # h3*s3
925         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
926          vmovdqu        16*0($inp),$H0                          # load input
927         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
928         vpmuludq        $T2,$H3,$H3             # h2*s3
929          vpmuludq       $T2,$H4,$T2             # h2*s4
930         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
931
932          vmovdqu        16*1($inp),$H1                          #
933         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
934         vpmuludq        $T3,$H4,$T3             # h3*s4
935         vpmuludq        $T4,$H4,$T4             # h4*s4
936          vpsrldq        \$6,$H0,$H2                             # splat input
937         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
938         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
939          vpsrldq        \$6,$H1,$H3                             #
940         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
941         vpmuludq        $T1,$H4,$T0             # h1*s4
942          vpunpckhqdq    $H1,$H0,$H4             # 4
943         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
944          vmovdqa        -0x90(%r11),$T4         # r0^4
945         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
946
947         vpunpcklqdq     $H1,$H0,$H0             # 0:1
948         vpunpcklqdq     $H3,$H2,$H3             # 2:3
949
950         #vpsrlq         \$40,$H4,$H4            # 4
951         vpsrldq         \$`40/8`,$H4,$H4        # 4
952         vpsrlq          \$26,$H0,$H1
953         vpand           $MASK,$H0,$H0           # 0
954         vpsrlq          \$4,$H3,$H2
955         vpand           $MASK,$H1,$H1           # 1
956         vpand           0(%rcx),$H4,$H4         # .Lmask24
957         vpsrlq          \$30,$H3,$H3
958         vpand           $MASK,$H2,$H2           # 2
959         vpand           $MASK,$H3,$H3           # 3
960         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
961
962         vpaddq          0x00(%r11),$H0,$H0      # add hash value
963         vpaddq          0x10(%r11),$H1,$H1
964         vpaddq          0x20(%r11),$H2,$H2
965         vpaddq          0x30(%r11),$H3,$H3
966         vpaddq          0x40(%r11),$H4,$H4
967
968         lea             16*2($inp),%rax
969         lea             16*4($inp),$inp
970         sub             \$64,$len
971         cmovc           %rax,$inp
972
973         ################################################################
974         # Now we accumulate (inp[0:1]+hash)*r^4
975         ################################################################
976         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
977         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
978         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
979         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
980         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
981
982         vpmuludq        $H0,$T4,$T0             # h0*r0
983         vpmuludq        $H1,$T4,$T1             # h1*r0
984         vpaddq          $T0,$D0,$D0
985         vpaddq          $T1,$D1,$D1
986          vmovdqa        -0x80(%r11),$T2         # r1^4
987         vpmuludq        $H2,$T4,$T0             # h2*r0
988         vpmuludq        $H3,$T4,$T1             # h3*r0
989         vpaddq          $T0,$D2,$D2
990         vpaddq          $T1,$D3,$D3
991         vpmuludq        $H4,$T4,$T4             # h4*r0
992          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
993         vpaddq          $T4,$D4,$D4
994
995         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
996         vpmuludq        $H2,$T2,$T1             # h2*r1
997         vpmuludq        $H3,$T2,$T0             # h3*r1
998         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
999          vmovdqa        -0x60(%r11),$T3         # r2^4
1000         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1001         vpmuludq        $H1,$T2,$T1             # h1*r1
1002         vpmuludq        $H0,$T2,$T2             # h0*r1
1003         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
1004         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1005
1006          vmovdqa        -0x50(%r11),$T4         # s2^4
1007         vpmuludq        $H2,$T3,$T0             # h2*r2
1008         vpmuludq        $H1,$T3,$T1             # h1*r2
1009         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
1010         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
1011          vmovdqa        -0x40(%r11),$T2         # r3^4
1012         vpmuludq        $H0,$T3,$T3             # h0*r2
1013         vpmuludq        $H4,$T4,$T0             # h4*s2
1014         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
1015         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
1016          vmovdqa        -0x30(%r11),$T3         # s3^4
1017         vpmuludq        $H3,$T4,$T4             # h3*s2
1018          vpmuludq       $H1,$T2,$T1             # h1*r3
1019         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1020
1021          vmovdqa        -0x10(%r11),$T4         # s4^4
1022         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
1023         vpmuludq        $H0,$T2,$T2             # h0*r3
1024         vpmuludq        $H4,$T3,$T0             # h4*s3
1025         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
1026         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
1027          vmovdqu        16*2($inp),$T0                          # load input
1028         vpmuludq        $H3,$T3,$T2             # h3*s3
1029         vpmuludq        $H2,$T3,$T3             # h2*s3
1030         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
1031          vmovdqu        16*3($inp),$T1                          #
1032         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
1033
1034         vpmuludq        $H2,$T4,$H2             # h2*s4
1035         vpmuludq        $H3,$T4,$H3             # h3*s4
1036          vpsrldq        \$6,$T0,$T2                             # splat input
1037         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
1038         vpmuludq        $H4,$T4,$H4             # h4*s4
1039          vpsrldq        \$6,$T1,$T3                             #
1040         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
1041         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
1042         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
1043         vpmuludq        $H1,$T4,$H0
1044          vpunpckhqdq    $T1,$T0,$T4             # 4
1045         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1046         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1047
1048         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1049         vpunpcklqdq     $T3,$T2,$T3             # 2:3
1050
1051         #vpsrlq         \$40,$T4,$T4            # 4
1052         vpsrldq         \$`40/8`,$T4,$T4        # 4
1053         vpsrlq          \$26,$T0,$T1
1054          vmovdqa        0x00(%rsp),$D4          # preload r0^2
1055         vpand           $MASK,$T0,$T0           # 0
1056         vpsrlq          \$4,$T3,$T2
1057         vpand           $MASK,$T1,$T1           # 1
1058         vpand           0(%rcx),$T4,$T4         # .Lmask24
1059         vpsrlq          \$30,$T3,$T3
1060         vpand           $MASK,$T2,$T2           # 2
1061         vpand           $MASK,$T3,$T3           # 3
1062         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1063
1064         ################################################################
1065         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1066         # and P. Schwabe
1067
1068         vpsrlq          \$26,$H3,$D3
1069         vpand           $MASK,$H3,$H3
1070         vpaddq          $D3,$H4,$H4             # h3 -> h4
1071
1072         vpsrlq          \$26,$H0,$D0
1073         vpand           $MASK,$H0,$H0
1074         vpaddq          $D0,$D1,$H1             # h0 -> h1
1075
1076         vpsrlq          \$26,$H4,$D0
1077         vpand           $MASK,$H4,$H4
1078
1079         vpsrlq          \$26,$H1,$D1
1080         vpand           $MASK,$H1,$H1
1081         vpaddq          $D1,$H2,$H2             # h1 -> h2
1082
1083         vpaddq          $D0,$H0,$H0
1084         vpsllq          \$2,$D0,$D0
1085         vpaddq          $D0,$H0,$H0             # h4 -> h0
1086
1087         vpsrlq          \$26,$H2,$D2
1088         vpand           $MASK,$H2,$H2
1089         vpaddq          $D2,$H3,$H3             # h2 -> h3
1090
1091         vpsrlq          \$26,$H0,$D0
1092         vpand           $MASK,$H0,$H0
1093         vpaddq          $D0,$H1,$H1             # h0 -> h1
1094
1095         vpsrlq          \$26,$H3,$D3
1096         vpand           $MASK,$H3,$H3
1097         vpaddq          $D3,$H4,$H4             # h3 -> h4
1098
1099         ja              .Loop_avx
1100
1101 .Lskip_loop_avx:
1102         ################################################################
1103         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1104
1105         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1106         add             \$32,$len
1107         jnz             .Long_tail_avx
1108
1109         vpaddq          $H2,$T2,$T2
1110         vpaddq          $H0,$T0,$T0
1111         vpaddq          $H1,$T1,$T1
1112         vpaddq          $H3,$T3,$T3
1113         vpaddq          $H4,$T4,$T4
1114
1115 .Long_tail_avx:
1116         vmovdqa         $H2,0x20(%r11)
1117         vmovdqa         $H0,0x00(%r11)
1118         vmovdqa         $H1,0x10(%r11)
1119         vmovdqa         $H3,0x30(%r11)
1120         vmovdqa         $H4,0x40(%r11)
1121
1122         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1123         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1124         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1125         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1126         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1127
1128         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1129         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1130          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1131         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1132         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1133         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1134
1135         vpmuludq        $T3,$H2,$H0             # h3*r1
1136         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1137          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1138         vpmuludq        $T2,$H2,$H1             # h2*r1
1139         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1140          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1141         vpmuludq        $T1,$H2,$H0             # h1*r1
1142         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1143         vpmuludq        $T0,$H2,$H2             # h0*r1
1144         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1145         vpmuludq        $T4,$H3,$H3             # h4*s1
1146         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1147
1148          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1149         vpmuludq        $T2,$H4,$H1             # h2*r2
1150         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1151         vpmuludq        $T1,$H4,$H0             # h1*r2
1152         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1153          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1154         vpmuludq        $T0,$H4,$H4             # h0*r2
1155         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1156         vpmuludq        $T4,$H2,$H1             # h4*s2
1157         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1158          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1159         vpmuludq        $T3,$H2,$H2             # h3*s2
1160         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1161
1162         vpmuludq        $T1,$H3,$H0             # h1*r3
1163         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1164         vpmuludq        $T0,$H3,$H3             # h0*r3
1165         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1166          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1167         vpmuludq        $T4,$H4,$H1             # h4*s3
1168         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1169          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1170         vpmuludq        $T3,$H4,$H0             # h3*s3
1171         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1172         vpmuludq        $T2,$H4,$H4             # h2*s3
1173         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1174
1175         vpmuludq        $T0,$H2,$H2             # h0*r4
1176         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1177         vpmuludq        $T4,$H3,$H1             # h4*s4
1178         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1179         vpmuludq        $T3,$H3,$H0             # h3*s4
1180         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1181         vpmuludq        $T2,$H3,$H1             # h2*s4
1182         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1183         vpmuludq        $T1,$H3,$H3             # h1*s4
1184         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1185
1186         jz              .Lshort_tail_avx
1187
1188         vmovdqu         16*0($inp),$H0          # load input
1189         vmovdqu         16*1($inp),$H1
1190
1191         vpsrldq         \$6,$H0,$H2             # splat input
1192         vpsrldq         \$6,$H1,$H3
1193         vpunpckhqdq     $H1,$H0,$H4             # 4
1194         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1195         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1196
1197         vpsrlq          \$40,$H4,$H4            # 4
1198         vpsrlq          \$26,$H0,$H1
1199         vpand           $MASK,$H0,$H0           # 0
1200         vpsrlq          \$4,$H3,$H2
1201         vpand           $MASK,$H1,$H1           # 1
1202         vpsrlq          \$30,$H3,$H3
1203         vpand           $MASK,$H2,$H2           # 2
1204         vpand           $MASK,$H3,$H3           # 3
1205         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1206
1207         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1208         vpaddq          0x00(%r11),$H0,$H0
1209         vpaddq          0x10(%r11),$H1,$H1
1210         vpaddq          0x20(%r11),$H2,$H2
1211         vpaddq          0x30(%r11),$H3,$H3
1212         vpaddq          0x40(%r11),$H4,$H4
1213
1214         ################################################################
1215         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1216
1217         vpmuludq        $H0,$T4,$T0             # h0*r0
1218         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1219         vpmuludq        $H1,$T4,$T1             # h1*r0
1220         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1221         vpmuludq        $H2,$T4,$T0             # h2*r0
1222         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1223          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1224         vpmuludq        $H3,$T4,$T1             # h3*r0
1225         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1226         vpmuludq        $H4,$T4,$T4             # h4*r0
1227         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1228
1229         vpmuludq        $H3,$T2,$T0             # h3*r1
1230         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1231          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1232         vpmuludq        $H2,$T2,$T1             # h2*r1
1233         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1234          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1235         vpmuludq        $H1,$T2,$T0             # h1*r1
1236         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1237         vpmuludq        $H0,$T2,$T2             # h0*r1
1238         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1239         vpmuludq        $H4,$T3,$T3             # h4*s1
1240         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1241
1242          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1243         vpmuludq        $H2,$T4,$T1             # h2*r2
1244         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1245         vpmuludq        $H1,$T4,$T0             # h1*r2
1246         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1247          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1248         vpmuludq        $H0,$T4,$T4             # h0*r2
1249         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1250         vpmuludq        $H4,$T2,$T1             # h4*s2
1251         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1252          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1253         vpmuludq        $H3,$T2,$T2             # h3*s2
1254         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1255
1256         vpmuludq        $H1,$T3,$T0             # h1*r3
1257         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1258         vpmuludq        $H0,$T3,$T3             # h0*r3
1259         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1260          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1261         vpmuludq        $H4,$T4,$T1             # h4*s3
1262         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1263          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1264         vpmuludq        $H3,$T4,$T0             # h3*s3
1265         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1266         vpmuludq        $H2,$T4,$T4             # h2*s3
1267         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1268
1269         vpmuludq        $H0,$T2,$T2             # h0*r4
1270         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1271         vpmuludq        $H4,$T3,$T1             # h4*s4
1272         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1273         vpmuludq        $H3,$T3,$T0             # h3*s4
1274         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1275         vpmuludq        $H2,$T3,$T1             # h2*s4
1276         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1277         vpmuludq        $H1,$T3,$T3             # h1*s4
1278         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1279
1280 .Lshort_tail_avx:
1281         ################################################################
1282         # horizontal addition
1283
1284         vpsrldq         \$8,$D4,$T4
1285         vpsrldq         \$8,$D3,$T3
1286         vpsrldq         \$8,$D1,$T1
1287         vpsrldq         \$8,$D0,$T0
1288         vpsrldq         \$8,$D2,$T2
1289         vpaddq          $T3,$D3,$D3
1290         vpaddq          $T4,$D4,$D4
1291         vpaddq          $T0,$D0,$D0
1292         vpaddq          $T1,$D1,$D1
1293         vpaddq          $T2,$D2,$D2
1294
1295         ################################################################
1296         # lazy reduction
1297
1298         vpsrlq          \$26,$D3,$H3
1299         vpand           $MASK,$D3,$D3
1300         vpaddq          $H3,$D4,$D4             # h3 -> h4
1301
1302         vpsrlq          \$26,$D0,$H0
1303         vpand           $MASK,$D0,$D0
1304         vpaddq          $H0,$D1,$D1             # h0 -> h1
1305
1306         vpsrlq          \$26,$D4,$H4
1307         vpand           $MASK,$D4,$D4
1308
1309         vpsrlq          \$26,$D1,$H1
1310         vpand           $MASK,$D1,$D1
1311         vpaddq          $H1,$D2,$D2             # h1 -> h2
1312
1313         vpaddq          $H4,$D0,$D0
1314         vpsllq          \$2,$H4,$H4
1315         vpaddq          $H4,$D0,$D0             # h4 -> h0
1316
1317         vpsrlq          \$26,$D2,$H2
1318         vpand           $MASK,$D2,$D2
1319         vpaddq          $H2,$D3,$D3             # h2 -> h3
1320
1321         vpsrlq          \$26,$D0,$H0
1322         vpand           $MASK,$D0,$D0
1323         vpaddq          $H0,$D1,$D1             # h0 -> h1
1324
1325         vpsrlq          \$26,$D3,$H3
1326         vpand           $MASK,$D3,$D3
1327         vpaddq          $H3,$D4,$D4             # h3 -> h4
1328
1329         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1330         vmovd           $D1,`4*1-48-64`($ctx)
1331         vmovd           $D2,`4*2-48-64`($ctx)
1332         vmovd           $D3,`4*3-48-64`($ctx)
1333         vmovd           $D4,`4*4-48-64`($ctx)
1334 ___
1335 $code.=<<___    if ($win64);
1336         vmovdqa         0x50(%r11),%xmm6
1337         vmovdqa         0x60(%r11),%xmm7
1338         vmovdqa         0x70(%r11),%xmm8
1339         vmovdqa         0x80(%r11),%xmm9
1340         vmovdqa         0x90(%r11),%xmm10
1341         vmovdqa         0xa0(%r11),%xmm11
1342         vmovdqa         0xb0(%r11),%xmm12
1343         vmovdqa         0xc0(%r11),%xmm13
1344         vmovdqa         0xd0(%r11),%xmm14
1345         vmovdqa         0xe0(%r11),%xmm15
1346         lea             0xf8(%r11),%rsp
1347 .Ldo_avx_epilogue:
1348 ___
1349 $code.=<<___    if (!$win64);
1350         lea             0x58(%r11),%rsp
1351 .cfi_def_cfa            %rsp,8
1352 ___
1353 $code.=<<___;
1354         vzeroupper
1355         ret
1356 .cfi_endproc
1357 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1358
1359 .type   poly1305_emit_avx,\@function,3
1360 .align  32
1361 poly1305_emit_avx:
1362         cmpl    \$0,20($ctx)    # is_base2_26?
1363         je      .Lemit
1364
1365         mov     0($ctx),%eax    # load hash value base 2^26
1366         mov     4($ctx),%ecx
1367         mov     8($ctx),%r8d
1368         mov     12($ctx),%r11d
1369         mov     16($ctx),%r10d
1370
1371         shl     \$26,%rcx       # base 2^26 -> base 2^64
1372         mov     %r8,%r9
1373         shl     \$52,%r8
1374         add     %rcx,%rax
1375         shr     \$12,%r9
1376         add     %rax,%r8        # h0
1377         adc     \$0,%r9
1378
1379         shl     \$14,%r11
1380         mov     %r10,%rax
1381         shr     \$24,%r10
1382         add     %r11,%r9
1383         shl     \$40,%rax
1384         add     %rax,%r9        # h1
1385         adc     \$0,%r10        # h2
1386
1387         mov     %r10,%rax       # could be partially reduced, so reduce
1388         mov     %r10,%rcx
1389         and     \$3,%r10
1390         shr     \$2,%rax
1391         and     \$-4,%rcx
1392         add     %rcx,%rax
1393         add     %rax,%r8
1394         adc     \$0,%r9
1395         adc     \$0,%r10
1396
1397         mov     %r8,%rax
1398         add     \$5,%r8         # compare to modulus
1399         mov     %r9,%rcx
1400         adc     \$0,%r9
1401         adc     \$0,%r10
1402         shr     \$2,%r10        # did 130-bit value overfow?
1403         cmovnz  %r8,%rax
1404         cmovnz  %r9,%rcx
1405
1406         add     0($nonce),%rax  # accumulate nonce
1407         adc     8($nonce),%rcx
1408         mov     %rax,0($mac)    # write result
1409         mov     %rcx,8($mac)
1410
1411         ret
1412 .size   poly1305_emit_avx,.-poly1305_emit_avx
1413 ___
1414
1415 if ($avx>1) {
1416 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1417     map("%ymm$_",(0..15));
1418 my $S4=$MASK;
1419
1420 $code.=<<___;
1421 .type   poly1305_blocks_avx2,\@function,4
1422 .align  32
1423 poly1305_blocks_avx2:
1424 .cfi_startproc
1425         mov     20($ctx),%r8d           # is_base2_26
1426         cmp     \$128,$len
1427         jae     .Lblocks_avx2
1428         test    %r8d,%r8d
1429         jz      .Lblocks
1430
1431 .Lblocks_avx2:
1432         and     \$-16,$len
1433         jz      .Lno_data_avx2
1434
1435         vzeroupper
1436
1437         test    %r8d,%r8d
1438         jz      .Lbase2_64_avx2
1439
1440         test    \$63,$len
1441         jz      .Leven_avx2
1442
1443         push    %rbx
1444 .cfi_push       %rbx
1445         push    %rbp
1446 .cfi_push       %rbp
1447         push    %r12
1448 .cfi_push       %r12
1449         push    %r13
1450 .cfi_push       %r13
1451         push    %r14
1452 .cfi_push       %r14
1453         push    %r15
1454 .cfi_push       %r15
1455 .Lblocks_avx2_body:
1456
1457         mov     $len,%r15               # reassign $len
1458
1459         mov     0($ctx),$d1             # load hash value
1460         mov     8($ctx),$d2
1461         mov     16($ctx),$h2#d
1462
1463         mov     24($ctx),$r0            # load r
1464         mov     32($ctx),$s1
1465
1466         ################################# base 2^26 -> base 2^64
1467         mov     $d1#d,$h0#d
1468         and     \$`-1*(1<<31)`,$d1
1469         mov     $d2,$r1                 # borrow $r1
1470         mov     $d2#d,$h1#d
1471         and     \$`-1*(1<<31)`,$d2
1472
1473         shr     \$6,$d1
1474         shl     \$52,$r1
1475         add     $d1,$h0
1476         shr     \$12,$h1
1477         shr     \$18,$d2
1478         add     $r1,$h0
1479         adc     $d2,$h1
1480
1481         mov     $h2,$d1
1482         shl     \$40,$d1
1483         shr     \$24,$h2
1484         add     $d1,$h1
1485         adc     \$0,$h2                 # can be partially reduced...
1486
1487         mov     \$-4,$d2                # ... so reduce
1488         mov     $h2,$d1
1489         and     $h2,$d2
1490         shr     \$2,$d1
1491         and     \$3,$h2
1492         add     $d2,$d1                 # =*5
1493         add     $d1,$h0
1494         adc     \$0,$h1
1495         adc     \$0,$h2
1496
1497         mov     $s1,$r1
1498         mov     $s1,%rax
1499         shr     \$2,$s1
1500         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1501
1502 .Lbase2_26_pre_avx2:
1503         add     0($inp),$h0             # accumulate input
1504         adc     8($inp),$h1
1505         lea     16($inp),$inp
1506         adc     $padbit,$h2
1507         sub     \$16,%r15
1508
1509         call    __poly1305_block
1510         mov     $r1,%rax
1511
1512         test    \$63,%r15
1513         jnz     .Lbase2_26_pre_avx2
1514
1515         test    $padbit,$padbit         # if $padbit is zero,
1516         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1517
1518         ################################# base 2^64 -> base 2^26
1519         mov     $h0,%rax
1520         mov     $h0,%rdx
1521         shr     \$52,$h0
1522         mov     $h1,$r0
1523         mov     $h1,$r1
1524         shr     \$26,%rdx
1525         and     \$0x3ffffff,%rax        # h[0]
1526         shl     \$12,$r0
1527         and     \$0x3ffffff,%rdx        # h[1]
1528         shr     \$14,$h1
1529         or      $r0,$h0
1530         shl     \$24,$h2
1531         and     \$0x3ffffff,$h0         # h[2]
1532         shr     \$40,$r1
1533         and     \$0x3ffffff,$h1         # h[3]
1534         or      $r1,$h2                 # h[4]
1535
1536         test    %r15,%r15
1537         jz      .Lstore_base2_26_avx2
1538
1539         vmovd   %rax#d,%x#$H0
1540         vmovd   %rdx#d,%x#$H1
1541         vmovd   $h0#d,%x#$H2
1542         vmovd   $h1#d,%x#$H3
1543         vmovd   $h2#d,%x#$H4
1544         jmp     .Lproceed_avx2
1545
1546 .align  32
1547 .Lstore_base2_64_avx2:
1548         mov     $h0,0($ctx)
1549         mov     $h1,8($ctx)
1550         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1551         jmp     .Ldone_avx2
1552
1553 .align  16
1554 .Lstore_base2_26_avx2:
1555         mov     %rax#d,0($ctx)          # store hash value base 2^26
1556         mov     %rdx#d,4($ctx)
1557         mov     $h0#d,8($ctx)
1558         mov     $h1#d,12($ctx)
1559         mov     $h2#d,16($ctx)
1560 .align  16
1561 .Ldone_avx2:
1562         mov     0(%rsp),%r15
1563 .cfi_restore    %r15
1564         mov     8(%rsp),%r14
1565 .cfi_restore    %r14
1566         mov     16(%rsp),%r13
1567 .cfi_restore    %r13
1568         mov     24(%rsp),%r12
1569 .cfi_restore    %r12
1570         mov     32(%rsp),%rbp
1571 .cfi_restore    %rbp
1572         mov     40(%rsp),%rbx
1573 .cfi_restore    %rbx
1574         lea     48(%rsp),%rsp
1575 .cfi_adjust_cfa_offset  -48
1576 .Lno_data_avx2:
1577 .Lblocks_avx2_epilogue:
1578         ret
1579 .cfi_endproc
1580
1581 .align  32
1582 .Lbase2_64_avx2:
1583 .cfi_startproc
1584         push    %rbx
1585 .cfi_push       %rbx
1586         push    %rbp
1587 .cfi_push       %rbp
1588         push    %r12
1589 .cfi_push       %r12
1590         push    %r13
1591 .cfi_push       %r13
1592         push    %r14
1593 .cfi_push       %r14
1594         push    %r15
1595 .cfi_push       %r15
1596 .Lbase2_64_avx2_body:
1597
1598         mov     $len,%r15               # reassign $len
1599
1600         mov     24($ctx),$r0            # load r
1601         mov     32($ctx),$s1
1602
1603         mov     0($ctx),$h0             # load hash value
1604         mov     8($ctx),$h1
1605         mov     16($ctx),$h2#d
1606
1607         mov     $s1,$r1
1608         mov     $s1,%rax
1609         shr     \$2,$s1
1610         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1611
1612         test    \$63,$len
1613         jz      .Linit_avx2
1614
1615 .Lbase2_64_pre_avx2:
1616         add     0($inp),$h0             # accumulate input
1617         adc     8($inp),$h1
1618         lea     16($inp),$inp
1619         adc     $padbit,$h2
1620         sub     \$16,%r15
1621
1622         call    __poly1305_block
1623         mov     $r1,%rax
1624
1625         test    \$63,%r15
1626         jnz     .Lbase2_64_pre_avx2
1627
1628 .Linit_avx2:
1629         ################################# base 2^64 -> base 2^26
1630         mov     $h0,%rax
1631         mov     $h0,%rdx
1632         shr     \$52,$h0
1633         mov     $h1,$d1
1634         mov     $h1,$d2
1635         shr     \$26,%rdx
1636         and     \$0x3ffffff,%rax        # h[0]
1637         shl     \$12,$d1
1638         and     \$0x3ffffff,%rdx        # h[1]
1639         shr     \$14,$h1
1640         or      $d1,$h0
1641         shl     \$24,$h2
1642         and     \$0x3ffffff,$h0         # h[2]
1643         shr     \$40,$d2
1644         and     \$0x3ffffff,$h1         # h[3]
1645         or      $d2,$h2                 # h[4]
1646
1647         vmovd   %rax#d,%x#$H0
1648         vmovd   %rdx#d,%x#$H1
1649         vmovd   $h0#d,%x#$H2
1650         vmovd   $h1#d,%x#$H3
1651         vmovd   $h2#d,%x#$H4
1652         movl    \$1,20($ctx)            # set is_base2_26
1653
1654         call    __poly1305_init_avx
1655
1656 .Lproceed_avx2:
1657         mov     %r15,$len                       # restore $len
1658         mov     OPENSSL_ia32cap_P+8(%rip),%r10d
1659         mov     \$`(1<<31|1<<30|1<<16)`,%r11d
1660
1661         mov     0(%rsp),%r15
1662 .cfi_restore    %r15
1663         mov     8(%rsp),%r14
1664 .cfi_restore    %r14
1665         mov     16(%rsp),%r13
1666 .cfi_restore    %r13
1667         mov     24(%rsp),%r12
1668 .cfi_restore    %r12
1669         mov     32(%rsp),%rbp
1670 .cfi_restore    %rbp
1671         mov     40(%rsp),%rbx
1672 .cfi_restore    %rbx
1673         lea     48(%rsp),%rax
1674         lea     48(%rsp),%rsp
1675 .cfi_adjust_cfa_offset  -48
1676 .Lbase2_64_avx2_epilogue:
1677         jmp     .Ldo_avx2
1678 .cfi_endproc
1679
1680 .align  32
1681 .Leven_avx2:
1682 .cfi_startproc
1683         mov             OPENSSL_ia32cap_P+8(%rip),%r10d
1684         mov             \$`(1<<31|1<<30|1<<16)`,%r11d
1685         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1686         vmovd           4*1($ctx),%x#$H1
1687         vmovd           4*2($ctx),%x#$H2
1688         vmovd           4*3($ctx),%x#$H3
1689         vmovd           4*4($ctx),%x#$H4
1690
1691 .Ldo_avx2:
1692 ___
1693 $code.=<<___            if ($avx>2);
1694         cmp             \$512,$len
1695         jb              .Lskip_avx512
1696         and             %r11d,%r10d
1697         cmp             %r11d,%r10d             # check for AVX512F+BW+VL
1698         je              .Lblocks_avx512
1699 .Lskip_avx512:
1700 ___
1701 $code.=<<___    if (!$win64);
1702         lea             -8(%rsp),%r11
1703 .cfi_def_cfa            %r11,16
1704         sub             \$0x128,%rsp
1705 ___
1706 $code.=<<___    if ($win64);
1707         lea             -0xf8(%rsp),%r11
1708         sub             \$0x1c8,%rsp
1709         vmovdqa         %xmm6,0x50(%r11)
1710         vmovdqa         %xmm7,0x60(%r11)
1711         vmovdqa         %xmm8,0x70(%r11)
1712         vmovdqa         %xmm9,0x80(%r11)
1713         vmovdqa         %xmm10,0x90(%r11)
1714         vmovdqa         %xmm11,0xa0(%r11)
1715         vmovdqa         %xmm12,0xb0(%r11)
1716         vmovdqa         %xmm13,0xc0(%r11)
1717         vmovdqa         %xmm14,0xd0(%r11)
1718         vmovdqa         %xmm15,0xe0(%r11)
1719 .Ldo_avx2_body:
1720 ___
1721 $code.=<<___;
1722         lea             .Lconst(%rip),%rcx
1723         lea             48+64($ctx),$ctx        # size optimization
1724         vmovdqa         96(%rcx),$T0            # .Lpermd_avx2
1725
1726         # expand and copy pre-calculated table to stack
1727         vmovdqu         `16*0-64`($ctx),%x#$T2
1728         and             \$-512,%rsp
1729         vmovdqu         `16*1-64`($ctx),%x#$T3
1730         vmovdqu         `16*2-64`($ctx),%x#$T4
1731         vmovdqu         `16*3-64`($ctx),%x#$D0
1732         vmovdqu         `16*4-64`($ctx),%x#$D1
1733         vmovdqu         `16*5-64`($ctx),%x#$D2
1734         lea             0x90(%rsp),%rax         # size optimization
1735         vmovdqu         `16*6-64`($ctx),%x#$D3
1736         vpermd          $T2,$T0,$T2             # 00003412 -> 14243444
1737         vmovdqu         `16*7-64`($ctx),%x#$D4
1738         vpermd          $T3,$T0,$T3
1739         vmovdqu         `16*8-64`($ctx),%x#$MASK
1740         vpermd          $T4,$T0,$T4
1741         vmovdqa         $T2,0x00(%rsp)
1742         vpermd          $D0,$T0,$D0
1743         vmovdqa         $T3,0x20-0x90(%rax)
1744         vpermd          $D1,$T0,$D1
1745         vmovdqa         $T4,0x40-0x90(%rax)
1746         vpermd          $D2,$T0,$D2
1747         vmovdqa         $D0,0x60-0x90(%rax)
1748         vpermd          $D3,$T0,$D3
1749         vmovdqa         $D1,0x80-0x90(%rax)
1750         vpermd          $D4,$T0,$D4
1751         vmovdqa         $D2,0xa0-0x90(%rax)
1752         vpermd          $MASK,$T0,$MASK
1753         vmovdqa         $D3,0xc0-0x90(%rax)
1754         vmovdqa         $D4,0xe0-0x90(%rax)
1755         vmovdqa         $MASK,0x100-0x90(%rax)
1756         vmovdqa         64(%rcx),$MASK          # .Lmask26
1757
1758         ################################################################
1759         # load input
1760         vmovdqu         16*0($inp),%x#$T0
1761         vmovdqu         16*1($inp),%x#$T1
1762         vinserti128     \$1,16*2($inp),$T0,$T0
1763         vinserti128     \$1,16*3($inp),$T1,$T1
1764         lea             16*4($inp),$inp
1765
1766         vpsrldq         \$6,$T0,$T2             # splat input
1767         vpsrldq         \$6,$T1,$T3
1768         vpunpckhqdq     $T1,$T0,$T4             # 4
1769         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1770         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1771
1772         vpsrlq          \$30,$T2,$T3
1773         vpsrlq          \$4,$T2,$T2
1774         vpsrlq          \$26,$T0,$T1
1775         vpsrlq          \$40,$T4,$T4            # 4
1776         vpand           $MASK,$T2,$T2           # 2
1777         vpand           $MASK,$T0,$T0           # 0
1778         vpand           $MASK,$T1,$T1           # 1
1779         vpand           $MASK,$T3,$T3           # 3
1780         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1781
1782         vpaddq          $H2,$T2,$H2             # accumulate input
1783         sub             \$64,$len
1784         jz              .Ltail_avx2
1785         jmp             .Loop_avx2
1786
1787 .align  32
1788 .Loop_avx2:
1789         ################################################################
1790         # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1791         # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1792         # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1793         # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1794         #   \________/\__________/
1795         ################################################################
1796         #vpaddq         $H2,$T2,$H2             # accumulate input
1797         vpaddq          $H0,$T0,$H0
1798         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1799         vpaddq          $H1,$T1,$H1
1800         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1801         vpaddq          $H3,$T3,$H3
1802         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1803         vpaddq          $H4,$T4,$H4
1804         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1805         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1806
1807         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1808         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1809         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1810         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1811         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1812         #
1813         # however, as h2 is "chronologically" first one available pull
1814         # corresponding operations up, so it's
1815         #
1816         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1817         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1818         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1819         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1820         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1821
1822         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1823         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1824         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1825         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1826         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1827
1828         vpmuludq        $H0,$T1,$T4             # h0*r1
1829         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1830         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1831         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1832         vpmuludq        $H3,$T1,$T4             # h3*r1
1833         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1834         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1835         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1836          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1837
1838         vpmuludq        $H0,$T0,$T4             # h0*r0
1839         vpmuludq        $H1,$T0,$H2             # h1*r0
1840         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1841         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1842         vpmuludq        $H3,$T0,$T4             # h3*r0
1843         vpmuludq        $H4,$T0,$H2             # h4*r0
1844          vmovdqu        16*0($inp),%x#$T0       # load input
1845         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1846         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1847          vinserti128    \$1,16*2($inp),$T0,$T0
1848
1849         vpmuludq        $H3,$T1,$T4             # h3*s2
1850         vpmuludq        $H4,$T1,$H2             # h4*s2
1851          vmovdqu        16*1($inp),%x#$T1
1852         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1853         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1854          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1855         vpmuludq        $H1,$T2,$T4             # h1*r2
1856         vpmuludq        $H0,$T2,$T2             # h0*r2
1857         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1858         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1859          vinserti128    \$1,16*3($inp),$T1,$T1
1860          lea            16*4($inp),$inp
1861
1862         vpmuludq        $H1,$H2,$T4             # h1*r3
1863         vpmuludq        $H0,$H2,$H2             # h0*r3
1864          vpsrldq        \$6,$T0,$T2             # splat input
1865         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1866         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1867         vpmuludq        $H3,$T3,$T4             # h3*s3
1868         vpmuludq        $H4,$T3,$H2             # h4*s3
1869          vpsrldq        \$6,$T1,$T3
1870         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1871         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1872          vpunpckhqdq    $T1,$T0,$T4             # 4
1873
1874         vpmuludq        $H3,$S4,$H3             # h3*s4
1875         vpmuludq        $H4,$S4,$H4             # h4*s4
1876          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1877         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1878         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1879          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1880         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1881         vpmuludq        $H1,$S4,$H0             # h1*s4
1882         vmovdqa         64(%rcx),$MASK          # .Lmask26
1883         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1884         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1885
1886         ################################################################
1887         # lazy reduction (interleaved with tail of input splat)
1888
1889         vpsrlq          \$26,$H3,$D3
1890         vpand           $MASK,$H3,$H3
1891         vpaddq          $D3,$H4,$H4             # h3 -> h4
1892
1893         vpsrlq          \$26,$H0,$D0
1894         vpand           $MASK,$H0,$H0
1895         vpaddq          $D0,$D1,$H1             # h0 -> h1
1896
1897         vpsrlq          \$26,$H4,$D4
1898         vpand           $MASK,$H4,$H4
1899
1900          vpsrlq         \$4,$T3,$T2
1901
1902         vpsrlq          \$26,$H1,$D1
1903         vpand           $MASK,$H1,$H1
1904         vpaddq          $D1,$H2,$H2             # h1 -> h2
1905
1906         vpaddq          $D4,$H0,$H0
1907         vpsllq          \$2,$D4,$D4
1908         vpaddq          $D4,$H0,$H0             # h4 -> h0
1909
1910          vpand          $MASK,$T2,$T2           # 2
1911          vpsrlq         \$26,$T0,$T1
1912
1913         vpsrlq          \$26,$H2,$D2
1914         vpand           $MASK,$H2,$H2
1915         vpaddq          $D2,$H3,$H3             # h2 -> h3
1916
1917          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1918          vpsrlq         \$30,$T3,$T3
1919
1920         vpsrlq          \$26,$H0,$D0
1921         vpand           $MASK,$H0,$H0
1922         vpaddq          $D0,$H1,$H1             # h0 -> h1
1923
1924          vpsrlq         \$40,$T4,$T4            # 4
1925
1926         vpsrlq          \$26,$H3,$D3
1927         vpand           $MASK,$H3,$H3
1928         vpaddq          $D3,$H4,$H4             # h3 -> h4
1929
1930          vpand          $MASK,$T0,$T0           # 0
1931          vpand          $MASK,$T1,$T1           # 1
1932          vpand          $MASK,$T3,$T3           # 3
1933          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1934
1935         sub             \$64,$len
1936         jnz             .Loop_avx2
1937
1938         .byte           0x66,0x90
1939 .Ltail_avx2:
1940         ################################################################
1941         # while above multiplications were by r^4 in all lanes, in last
1942         # iteration we multiply least significant lane by r^4 and most
1943         # significant one by r, so copy of above except that references
1944         # to the precomputed table are displaced by 4...
1945
1946         #vpaddq         $H2,$T2,$H2             # accumulate input
1947         vpaddq          $H0,$T0,$H0
1948         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1949         vpaddq          $H1,$T1,$H1
1950         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1951         vpaddq          $H3,$T3,$H3
1952         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1953         vpaddq          $H4,$T4,$H4
1954         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1955         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1956
1957         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1958         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1959         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1960         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1961         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1962
1963         vpmuludq        $H0,$T1,$T4             # h0*r1
1964         vpmuludq        $H1,$T1,$H2             # h1*r1
1965         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1966         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1967         vpmuludq        $H3,$T1,$T4             # h3*r1
1968         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1969         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1970         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1971
1972         vpmuludq        $H0,$T0,$T4             # h0*r0
1973         vpmuludq        $H1,$T0,$H2             # h1*r0
1974         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1975          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1976         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1977         vpmuludq        $H3,$T0,$T4             # h3*r0
1978         vpmuludq        $H4,$T0,$H2             # h4*r0
1979         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1980         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1981
1982         vpmuludq        $H3,$T1,$T4             # h3*s2
1983         vpmuludq        $H4,$T1,$H2             # h4*s2
1984         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1985         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1986          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1987         vpmuludq        $H1,$T2,$T4             # h1*r2
1988         vpmuludq        $H0,$T2,$T2             # h0*r2
1989         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1990         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1991
1992         vpmuludq        $H1,$H2,$T4             # h1*r3
1993         vpmuludq        $H0,$H2,$H2             # h0*r3
1994         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1995         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1996         vpmuludq        $H3,$T3,$T4             # h3*s3
1997         vpmuludq        $H4,$T3,$H2             # h4*s3
1998         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1999         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
2000
2001         vpmuludq        $H3,$S4,$H3             # h3*s4
2002         vpmuludq        $H4,$S4,$H4             # h4*s4
2003         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
2004         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
2005         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
2006         vpmuludq        $H1,$S4,$H0             # h1*s4
2007         vmovdqa         64(%rcx),$MASK          # .Lmask26
2008         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
2009         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
2010
2011         ################################################################
2012         # horizontal addition
2013
2014         vpsrldq         \$8,$D1,$T1
2015         vpsrldq         \$8,$H2,$T2
2016         vpsrldq         \$8,$H3,$T3
2017         vpsrldq         \$8,$H4,$T4
2018         vpsrldq         \$8,$H0,$T0
2019         vpaddq          $T1,$D1,$D1
2020         vpaddq          $T2,$H2,$H2
2021         vpaddq          $T3,$H3,$H3
2022         vpaddq          $T4,$H4,$H4
2023         vpaddq          $T0,$H0,$H0
2024
2025         vpermq          \$0x2,$H3,$T3
2026         vpermq          \$0x2,$H4,$T4
2027         vpermq          \$0x2,$H0,$T0
2028         vpermq          \$0x2,$D1,$T1
2029         vpermq          \$0x2,$H2,$T2
2030         vpaddq          $T3,$H3,$H3
2031         vpaddq          $T4,$H4,$H4
2032         vpaddq          $T0,$H0,$H0
2033         vpaddq          $T1,$D1,$D1
2034         vpaddq          $T2,$H2,$H2
2035
2036         ################################################################
2037         # lazy reduction
2038
2039         vpsrlq          \$26,$H3,$D3
2040         vpand           $MASK,$H3,$H3
2041         vpaddq          $D3,$H4,$H4             # h3 -> h4
2042
2043         vpsrlq          \$26,$H0,$D0
2044         vpand           $MASK,$H0,$H0
2045         vpaddq          $D0,$D1,$H1             # h0 -> h1
2046
2047         vpsrlq          \$26,$H4,$D4
2048         vpand           $MASK,$H4,$H4
2049
2050         vpsrlq          \$26,$H1,$D1
2051         vpand           $MASK,$H1,$H1
2052         vpaddq          $D1,$H2,$H2             # h1 -> h2
2053
2054         vpaddq          $D4,$H0,$H0
2055         vpsllq          \$2,$D4,$D4
2056         vpaddq          $D4,$H0,$H0             # h4 -> h0
2057
2058         vpsrlq          \$26,$H2,$D2
2059         vpand           $MASK,$H2,$H2
2060         vpaddq          $D2,$H3,$H3             # h2 -> h3
2061
2062         vpsrlq          \$26,$H0,$D0
2063         vpand           $MASK,$H0,$H0
2064         vpaddq          $D0,$H1,$H1             # h0 -> h1
2065
2066         vpsrlq          \$26,$H3,$D3
2067         vpand           $MASK,$H3,$H3
2068         vpaddq          $D3,$H4,$H4             # h3 -> h4
2069
2070         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2071         vmovd           %x#$H1,`4*1-48-64`($ctx)
2072         vmovd           %x#$H2,`4*2-48-64`($ctx)
2073         vmovd           %x#$H3,`4*3-48-64`($ctx)
2074         vmovd           %x#$H4,`4*4-48-64`($ctx)
2075 ___
2076 $code.=<<___    if ($win64);
2077         vmovdqa         0x50(%r11),%xmm6
2078         vmovdqa         0x60(%r11),%xmm7
2079         vmovdqa         0x70(%r11),%xmm8
2080         vmovdqa         0x80(%r11),%xmm9
2081         vmovdqa         0x90(%r11),%xmm10
2082         vmovdqa         0xa0(%r11),%xmm11
2083         vmovdqa         0xb0(%r11),%xmm12
2084         vmovdqa         0xc0(%r11),%xmm13
2085         vmovdqa         0xd0(%r11),%xmm14
2086         vmovdqa         0xe0(%r11),%xmm15
2087         lea             0xf8(%r11),%rsp
2088 .Ldo_avx2_epilogue:
2089 ___
2090 $code.=<<___    if (!$win64);
2091         lea             8(%r11),%rsp
2092 .cfi_def_cfa            %rsp,8
2093 ___
2094 $code.=<<___;
2095         vzeroupper
2096         ret
2097 .cfi_endproc
2098 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
2099 ___
2100 #######################################################################
2101 if ($avx>2) {
2102 # On entry we have input length divisible by 64. But since inner loop
2103 # processes 128 bytes per iteration, cases when length is not divisible
2104 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2105 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2106 # for this tail, we wouldn't have to even allocate stack frame...
2107
2108 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
2109 my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
2110 my $PADBIT="%zmm30";
2111 my $GATHER="%ymm31";
2112
2113 $code.=<<___;
2114 .type   poly1305_blocks_avx512,\@function,4
2115 .align  32
2116 poly1305_blocks_avx512:
2117 .cfi_startproc
2118 .Lblocks_avx512:
2119         vzeroupper
2120 ___
2121 $code.=<<___    if (!$win64);
2122         lea             -8(%rsp),%r11
2123 .cfi_def_cfa            %r11,16
2124         sub             \$0x128,%rsp
2125 ___
2126 $code.=<<___    if ($win64);
2127         lea             -0xf8(%rsp),%r11
2128         sub             \$0x1c8,%rsp
2129         vmovdqa         %xmm6,0x50(%r11)
2130         vmovdqa         %xmm7,0x60(%r11)
2131         vmovdqa         %xmm8,0x70(%r11)
2132         vmovdqa         %xmm9,0x80(%r11)
2133         vmovdqa         %xmm10,0x90(%r11)
2134         vmovdqa         %xmm11,0xa0(%r11)
2135         vmovdqa         %xmm12,0xb0(%r11)
2136         vmovdqa         %xmm13,0xc0(%r11)
2137         vmovdqa         %xmm14,0xd0(%r11)
2138         vmovdqa         %xmm15,0xe0(%r11)
2139 .Ldo_avx512_body:
2140 ___
2141 $code.=<<___;
2142         lea             .Lconst(%rip),%rcx
2143         lea             48+64($ctx),$ctx        # size optimization
2144         vmovdqa         96(%rcx),$T2            # .Lpermd_avx2
2145
2146         # expand pre-calculated table
2147         vmovdqu32       `16*0-64`($ctx),%x#$R0
2148         and             \$-512,%rsp
2149         vmovdqu32       `16*1-64`($ctx),%x#$R1
2150         vmovdqu32       `16*2-64`($ctx),%x#$S1
2151         vmovdqu32       `16*3-64`($ctx),%x#$R2
2152         vmovdqu32       `16*4-64`($ctx),%x#$S2
2153         vmovdqu32       `16*5-64`($ctx),%x#$R3
2154         vmovdqu32       `16*6-64`($ctx),%x#$S3
2155         vmovdqu32       `16*7-64`($ctx),%x#$R4
2156         vmovdqu32       `16*8-64`($ctx),%x#$S4
2157         vpermd          $R0,$T2,$R0             # 00003412 -> 14243444
2158         vmovdqa64       64(%rcx),$MASK          # .Lmask26
2159         vpermd          $R1,$T2,$R1
2160         vpermd          $S1,$T2,$S1
2161         vpermd          $R2,$T2,$R2
2162         vmovdqa32       $R0,0x00(%rsp)          # save in case $len%128 != 0
2163          vpsrlq         \$32,$R0,$T0            # 14243444 -> 01020304
2164         vpermd          $S2,$T2,$S2
2165         vmovdqa32       $R1,0x20(%rsp)
2166          vpsrlq         \$32,$R1,$T1
2167         vpermd          $R3,$T2,$R3
2168         vmovdqa32       $S1,0x40(%rsp)
2169         vpermd          $S3,$T2,$S3
2170         vpermd          $R4,$T2,$R4
2171         vmovdqa32       $R2,0x60(%rsp)
2172         vpermd          $S4,$T2,$S4
2173         vmovdqa32       $S2,0x80(%rsp)
2174         vmovdqa32       $R3,0xa0(%rsp)
2175         vmovdqa32       $S3,0xc0(%rsp)
2176         vmovdqa32       $R4,0xe0(%rsp)
2177         vmovdqa32       $S4,0x100(%rsp)
2178
2179         ################################################################
2180         # calculate 5th through 8th powers of the key
2181         #
2182         # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2183         # d1 = r0'*r1 + r1'*r0   + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2184         # d2 = r0'*r2 + r1'*r1   + r2'*r0   + r3'*5*r4 + r4'*5*r3
2185         # d3 = r0'*r3 + r1'*r2   + r2'*r1   + r3'*r0   + r4'*5*r4
2186         # d4 = r0'*r4 + r1'*r3   + r2'*r2   + r3'*r1   + r4'*r0
2187
2188         vpmuludq        $T0,$R0,$D0             # d0 = r0'*r0
2189         vpmuludq        $T0,$R1,$D1             # d1 = r0'*r1
2190         vpmuludq        $T0,$R2,$D2             # d2 = r0'*r2
2191         vpmuludq        $T0,$R3,$D3             # d3 = r0'*r3
2192         vpmuludq        $T0,$R4,$D4             # d4 = r0'*r4
2193          vpsrlq         \$32,$R2,$T2
2194
2195         vpmuludq        $T1,$S4,$M0
2196         vpmuludq        $T1,$R0,$M1
2197         vpmuludq        $T1,$R1,$M2
2198         vpmuludq        $T1,$R2,$M3
2199         vpmuludq        $T1,$R3,$M4
2200          vpsrlq         \$32,$R3,$T3
2201         vpaddq          $M0,$D0,$D0             # d0 += r1'*5*r4
2202         vpaddq          $M1,$D1,$D1             # d1 += r1'*r0
2203         vpaddq          $M2,$D2,$D2             # d2 += r1'*r1
2204         vpaddq          $M3,$D3,$D3             # d3 += r1'*r2
2205         vpaddq          $M4,$D4,$D4             # d4 += r1'*r3
2206
2207         vpmuludq        $T2,$S3,$M0
2208         vpmuludq        $T2,$S4,$M1
2209         vpmuludq        $T2,$R1,$M3
2210         vpmuludq        $T2,$R2,$M4
2211         vpmuludq        $T2,$R0,$M2
2212          vpsrlq         \$32,$R4,$T4
2213         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r3
2214         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r4
2215         vpaddq          $M3,$D3,$D3             # d3 += r2'*r1
2216         vpaddq          $M4,$D4,$D4             # d4 += r2'*r2
2217         vpaddq          $M2,$D2,$D2             # d2 += r2'*r0
2218
2219         vpmuludq        $T3,$S2,$M0
2220         vpmuludq        $T3,$R0,$M3
2221         vpmuludq        $T3,$R1,$M4
2222         vpmuludq        $T3,$S3,$M1
2223         vpmuludq        $T3,$S4,$M2
2224         vpaddq          $M0,$D0,$D0             # d0 += r3'*5*r2
2225         vpaddq          $M3,$D3,$D3             # d3 += r3'*r0
2226         vpaddq          $M4,$D4,$D4             # d4 += r3'*r1
2227         vpaddq          $M1,$D1,$D1             # d1 += r3'*5*r3
2228         vpaddq          $M2,$D2,$D2             # d2 += r3'*5*r4
2229
2230         vpmuludq        $T4,$S4,$M3
2231         vpmuludq        $T4,$R0,$M4
2232         vpmuludq        $T4,$S1,$M0
2233         vpmuludq        $T4,$S2,$M1
2234         vpmuludq        $T4,$S3,$M2
2235         vpaddq          $M3,$D3,$D3             # d3 += r2'*5*r4
2236         vpaddq          $M4,$D4,$D4             # d4 += r2'*r0
2237         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r1
2238         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r2
2239         vpaddq          $M2,$D2,$D2             # d2 += r2'*5*r3
2240
2241         ################################################################
2242         # load input
2243         vmovdqu64       16*0($inp),%z#$T3
2244         vmovdqu64       16*4($inp),%z#$T4
2245         lea             16*8($inp),$inp
2246
2247         ################################################################
2248         # lazy reduction
2249
2250         vpsrlq          \$26,$D3,$M3
2251         vpandq          $MASK,$D3,$D3
2252         vpaddq          $M3,$D4,$D4             # d3 -> d4
2253
2254         vpsrlq          \$26,$D0,$M0
2255         vpandq          $MASK,$D0,$D0
2256         vpaddq          $M0,$D1,$D1             # d0 -> d1
2257
2258         vpsrlq          \$26,$D4,$M4
2259         vpandq          $MASK,$D4,$D4
2260
2261         vpsrlq          \$26,$D1,$M1
2262         vpandq          $MASK,$D1,$D1
2263         vpaddq          $M1,$D2,$D2             # d1 -> d2
2264
2265         vpaddq          $M4,$D0,$D0
2266         vpsllq          \$2,$M4,$M4
2267         vpaddq          $M4,$D0,$D0             # d4 -> d0
2268
2269         vpsrlq          \$26,$D2,$M2
2270         vpandq          $MASK,$D2,$D2
2271         vpaddq          $M2,$D3,$D3             # d2 -> d3
2272
2273         vpsrlq          \$26,$D0,$M0
2274         vpandq          $MASK,$D0,$D0
2275         vpaddq          $M0,$D1,$D1             # d0 -> d1
2276
2277         vpsrlq          \$26,$D3,$M3
2278         vpandq          $MASK,$D3,$D3
2279         vpaddq          $M3,$D4,$D4             # d3 -> d4
2280
2281 ___
2282 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));            # switch to %zmm domain
2283 map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
2284 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2285 map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
2286 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2287 map(s/%y/%z/,($MASK));
2288 $code.=<<___;
2289         ################################################################
2290         # at this point we have 14243444 in $R0-$S4 and 05060708 in
2291         # $D0-$D4, ...
2292
2293         vpunpcklqdq     $T4,$T3,$T0     # transpose input
2294         vpunpckhqdq     $T4,$T3,$T4
2295
2296         # ... since input 64-bit lanes are ordered as 73625140, we could
2297         # "vperm" it to 76543210 (here and in each loop iteration), *or*
2298         # we could just flow along, hence the goal for $R0-$S4 is
2299         # 1858286838784888 ...
2300
2301         mov             \$0b0110011001100110,%eax
2302         mov             \$0b1100110011001100,%r8d
2303         mov             \$0b0101010101010101,%r9d
2304         kmovw           %eax,%k1
2305         kmovw           %r8d,%k2
2306         kmovw           %r9d,%k3
2307
2308         vpbroadcastq    %x#$D0,$M0      # 0808080808080808
2309         vpbroadcastq    %x#$D1,$M1
2310         vpbroadcastq    %x#$D2,$M2
2311         vpbroadcastq    %x#$D3,$M3
2312         vpbroadcastq    %x#$D4,$M4
2313
2314         vpexpandd       $D0,${D0}{%k1}  # 05060708 -> -05--06--07--08-
2315         vpexpandd       $D1,${D1}{%k1}
2316         vpexpandd       $D2,${D2}{%k1}
2317         vpexpandd       $D3,${D3}{%k1}
2318         vpexpandd       $D4,${D4}{%k1}
2319
2320         vpexpandd       $R0,${D0}{%k2}  # -05--06--07--08- -> 145-246-347-448-
2321         vpexpandd       $R1,${D1}{%k2}
2322         vpexpandd       $R2,${D2}{%k2}
2323         vpexpandd       $R3,${D3}{%k2}
2324         vpexpandd       $R4,${D4}{%k2}
2325
2326         vpblendmd       $M0,$D0,${R0}{%k3}      # 1858286838784888
2327         vpblendmd       $M1,$D1,${R1}{%k3}
2328         vpblendmd       $M2,$D2,${R2}{%k3}
2329         vpblendmd       $M3,$D3,${R3}{%k3}
2330         vpblendmd       $M4,$D4,${R4}{%k3}
2331
2332         vpslld          \$2,$R1,$S1             # *5
2333         vpslld          \$2,$R2,$S2
2334         vpslld          \$2,$R3,$S3
2335         vpslld          \$2,$R4,$S4
2336         vpaddd          $R1,$S1,$S1
2337         vpaddd          $R2,$S2,$S2
2338         vpaddd          $R3,$S3,$S3
2339         vpaddd          $R4,$S4,$S4
2340
2341         vpbroadcastq    %x#$MASK,$MASK
2342         vpbroadcastq    32(%rcx),$PADBIT        # .L129
2343
2344         vpsrlq          \$52,$T0,$T2            # splat input
2345         vpsllq          \$12,$T4,$T3
2346         vporq           $T3,$T2,$T2
2347         vpsrlq          \$26,$T0,$T1
2348         vpsrlq          \$14,$T4,$T3
2349         vpsrlq          \$40,$T4,$T4            # 4
2350         vpandq          $MASK,$T2,$T2           # 2
2351         vpandq          $MASK,$T0,$T0           # 0
2352         vpandq          $MASK,$T1,$T1           # 1
2353         vpandq          $MASK,$T3,$T3           # 3
2354         #vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2355
2356         vpaddq          $H2,$T2,$H2             # accumulate input
2357         mov             \$0x0f,%eax
2358         sub             \$192,$len
2359         jbe             .Ltail_avx512
2360         jmp             .Loop_avx512
2361
2362 .align  32
2363 .Loop_avx512:
2364         ################################################################
2365         # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2366         # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2367         # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2368         # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2369         # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2370         # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2371         # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2372         # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2373         #   \________/\___________/
2374         ################################################################
2375         #vpaddq         $H2,$T2,$H2             # accumulate input
2376
2377         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
2378         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
2379         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
2380         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
2381         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2382         #
2383         # however, as h2 is "chronologically" first one available pull
2384         # corresponding operations up, so it's
2385         #
2386         # d3 = h2*r1   + h0*r3 + h1*r2   + h3*r0 + h4*5*r4
2387         # d4 = h2*r2   + h0*r4 + h1*r3   + h3*r1 + h4*r0
2388         # d0 = h2*5*r3 + h0*r0 + h1*5*r4         + h3*5*r2 + h4*5*r1
2389         # d1 = h2*5*r4 + h0*r1           + h1*r0 + h3*5*r3 + h4*5*r2
2390         # d2 = h2*r0           + h0*r2   + h1*r1 + h3*5*r4 + h4*5*r3
2391
2392         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2393          vpaddq         $H0,$T0,$H0
2394         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2395         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2396         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2397          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2398         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2399          vpaddq         $H1,$T1,$H1             # accumulate input
2400          vpaddq         $H3,$T3,$H3
2401          vpaddq         $H4,$T4,$H4
2402
2403           vmovdqu64     16*0($inp),$T3          # load input
2404           vmovdqu64     16*4($inp),$T4
2405           lea           16*8($inp),$inp
2406         vpmuludq        $H0,$R3,$M3
2407         vpmuludq        $H0,$R4,$M4
2408         vpmuludq        $H0,$R0,$M0
2409         vpmuludq        $H0,$R1,$M1
2410         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2411         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2412         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2413         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2414
2415         vpmuludq        $H1,$R2,$M3
2416         vpmuludq        $H1,$R3,$M4
2417         vpmuludq        $H1,$S4,$M0
2418         vpmuludq        $H0,$R2,$M2
2419         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2420         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2421         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2422         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2423
2424           vpunpcklqdq   $T4,$T3,$T0             # transpose input
2425           vpunpckhqdq   $T4,$T3,$T4
2426
2427         vpmuludq        $H3,$R0,$M3
2428         vpmuludq        $H3,$R1,$M4
2429         vpmuludq        $H1,$R0,$M1
2430         vpmuludq        $H1,$R1,$M2
2431         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2432         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2433         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2434         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2435
2436         vpmuludq        $H4,$S4,$M3
2437         vpmuludq        $H4,$R0,$M4
2438         vpmuludq        $H3,$S2,$M0
2439         vpmuludq        $H3,$S3,$M1
2440         vpaddq          $M3,$D3,$D3             # d3 += h4*s4
2441         vpmuludq        $H3,$S4,$M2
2442         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2443         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2444         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2445         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2446
2447         vpmuludq        $H4,$S1,$M0
2448         vpmuludq        $H4,$S2,$M1
2449         vpmuludq        $H4,$S3,$M2
2450         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2451         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2452         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2453
2454         ################################################################
2455         # lazy reduction (interleaved with input splat)
2456
2457          vpsrlq         \$52,$T0,$T2            # splat input
2458          vpsllq         \$12,$T4,$T3
2459
2460         vpsrlq          \$26,$D3,$H3
2461         vpandq          $MASK,$D3,$D3
2462         vpaddq          $H3,$D4,$H4             # h3 -> h4
2463
2464          vporq          $T3,$T2,$T2
2465
2466         vpsrlq          \$26,$H0,$D0
2467         vpandq          $MASK,$H0,$H0
2468         vpaddq          $D0,$H1,$H1             # h0 -> h1
2469
2470          vpandq         $MASK,$T2,$T2           # 2
2471
2472         vpsrlq          \$26,$H4,$D4
2473         vpandq          $MASK,$H4,$H4
2474
2475         vpsrlq          \$26,$H1,$D1
2476         vpandq          $MASK,$H1,$H1
2477         vpaddq          $D1,$H2,$H2             # h1 -> h2
2478
2479         vpaddq          $D4,$H0,$H0
2480         vpsllq          \$2,$D4,$D4
2481         vpaddq          $D4,$H0,$H0             # h4 -> h0
2482
2483          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2484          vpsrlq         \$26,$T0,$T1
2485
2486         vpsrlq          \$26,$H2,$D2
2487         vpandq          $MASK,$H2,$H2
2488         vpaddq          $D2,$D3,$H3             # h2 -> h3
2489
2490          vpsrlq         \$14,$T4,$T3
2491
2492         vpsrlq          \$26,$H0,$D0
2493         vpandq          $MASK,$H0,$H0
2494         vpaddq          $D0,$H1,$H1             # h0 -> h1
2495
2496          vpsrlq         \$40,$T4,$T4            # 4
2497
2498         vpsrlq          \$26,$H3,$D3
2499         vpandq          $MASK,$H3,$H3
2500         vpaddq          $D3,$H4,$H4             # h3 -> h4
2501
2502          vpandq         $MASK,$T0,$T0           # 0
2503          vpandq         $MASK,$T1,$T1           # 1
2504          vpandq         $MASK,$T3,$T3           # 3
2505          #vporq         $PADBIT,$T4,$T4         # padbit, yes, always
2506
2507         sub             \$128,$len
2508         ja              .Loop_avx512
2509
2510 .Ltail_avx512:
2511         ################################################################
2512         # while above multiplications were by r^8 in all lanes, in last
2513         # iteration we multiply least significant lane by r^8 and most
2514         # significant one by r, that's why table gets shifted...
2515
2516         vpsrlq          \$32,$R0,$R0            # 0105020603070408
2517         vpsrlq          \$32,$R1,$R1
2518         vpsrlq          \$32,$R2,$R2
2519         vpsrlq          \$32,$S3,$S3
2520         vpsrlq          \$32,$S4,$S4
2521         vpsrlq          \$32,$R3,$R3
2522         vpsrlq          \$32,$R4,$R4
2523         vpsrlq          \$32,$S1,$S1
2524         vpsrlq          \$32,$S2,$S2
2525
2526         ################################################################
2527         # load either next or last 64 byte of input
2528         lea             ($inp,$len),$inp
2529
2530         #vpaddq         $H2,$T2,$H2             # accumulate input
2531         vpaddq          $H0,$T0,$H0
2532
2533         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2534         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2535         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2536         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2537         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2538          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2539          vpaddq         $H1,$T1,$H1             # accumulate input
2540          vpaddq         $H3,$T3,$H3
2541          vpaddq         $H4,$T4,$H4
2542
2543           vmovdqu64     16*0($inp),%x#$T0
2544         vpmuludq        $H0,$R3,$M3
2545         vpmuludq        $H0,$R4,$M4
2546         vpmuludq        $H0,$R0,$M0
2547         vpmuludq        $H0,$R1,$M1
2548         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2549         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2550         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2551         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2552
2553           vmovdqu64     16*1($inp),%x#$T1
2554         vpmuludq        $H1,$R2,$M3
2555         vpmuludq        $H1,$R3,$M4
2556         vpmuludq        $H1,$S4,$M0
2557         vpmuludq        $H0,$R2,$M2
2558         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2559         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2560         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2561         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2562
2563           vinserti64x2  \$1,16*2($inp),$T0,$T0
2564         vpmuludq        $H3,$R0,$M3
2565         vpmuludq        $H3,$R1,$M4
2566         vpmuludq        $H1,$R0,$M1
2567         vpmuludq        $H1,$R1,$M2
2568         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2569         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2570         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2571         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2572
2573           vinserti64x2  \$1,16*3($inp),$T1,$T1
2574         vpmuludq        $H4,$S4,$M3
2575         vpmuludq        $H4,$R0,$M4
2576         vpmuludq        $H3,$S2,$M0
2577         vpmuludq        $H3,$S3,$M1
2578         vpmuludq        $H3,$S4,$M2
2579         vpaddq          $M3,$D3,$H3             # h3 = d3 + h4*s4
2580         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2581         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2582         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2583         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2584
2585         vpmuludq        $H4,$S1,$M0
2586         vpmuludq        $H4,$S2,$M1
2587         vpmuludq        $H4,$S3,$M2
2588         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2589         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2590         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2591
2592         ################################################################
2593         # horizontal addition
2594
2595         mov             \$1,%eax
2596         vpsrldq         \$8,$H3,$D3
2597         vpsrldq         \$8,$D4,$H4
2598         vpsrldq         \$8,$H0,$D0
2599         vpsrldq         \$8,$H1,$D1
2600         vpsrldq         \$8,$H2,$D2
2601         vpaddq          $D3,$H3,$H3
2602         vpaddq          $D4,$H4,$H4
2603         vpaddq          $D0,$H0,$H0
2604         vpaddq          $D1,$H1,$H1
2605         vpaddq          $D2,$H2,$H2
2606
2607         kmovw           %eax,%k3
2608         vpermq          \$0x2,$H3,$D3
2609         vpermq          \$0x2,$H4,$D4
2610         vpermq          \$0x2,$H0,$D0
2611         vpermq          \$0x2,$H1,$D1
2612         vpermq          \$0x2,$H2,$D2
2613         vpaddq          $D3,$H3,$H3
2614         vpaddq          $D4,$H4,$H4
2615         vpaddq          $D0,$H0,$H0
2616         vpaddq          $D1,$H1,$H1
2617         vpaddq          $D2,$H2,$H2
2618
2619         vextracti64x4   \$0x1,$H3,%y#$D3
2620         vextracti64x4   \$0x1,$H4,%y#$D4
2621         vextracti64x4   \$0x1,$H0,%y#$D0
2622         vextracti64x4   \$0x1,$H1,%y#$D1
2623         vextracti64x4   \$0x1,$H2,%y#$D2
2624         vpaddq          $D3,$H3,${H3}{%k3}{z}   # keep single qword in case
2625         vpaddq          $D4,$H4,${H4}{%k3}{z}   # it's passed to .Ltail_avx2
2626         vpaddq          $D0,$H0,${H0}{%k3}{z}
2627         vpaddq          $D1,$H1,${H1}{%k3}{z}
2628         vpaddq          $D2,$H2,${H2}{%k3}{z}
2629 ___
2630 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2631 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2632 $code.=<<___;
2633         ################################################################
2634         # lazy reduction (interleaved with input splat)
2635
2636         vpsrlq          \$26,$H3,$D3
2637         vpandq          $MASK,$H3,$H3
2638          vpsrldq        \$6,$T0,$T2             # splat input
2639          vpsrldq        \$6,$T1,$T3
2640          vpunpckhqdq    $T1,$T0,$T4             # 4
2641         vpaddq          $D3,$H4,$H4             # h3 -> h4
2642
2643         vpsrlq          \$26,$H0,$D0
2644         vpandq          $MASK,$H0,$H0
2645          vpunpcklqdq    $T3,$T2,$T2             # 2:3
2646          vpunpcklqdq    $T1,$T0,$T0             # 0:1
2647         vpaddq          $D0,$H1,$H1             # h0 -> h1
2648
2649         vpsrlq          \$26,$H4,$D4
2650         vpandq          $MASK,$H4,$H4
2651
2652         vpsrlq          \$26,$H1,$D1
2653         vpandq          $MASK,$H1,$H1
2654          vpsrlq         \$30,$T2,$T3
2655          vpsrlq         \$4,$T2,$T2
2656         vpaddq          $D1,$H2,$H2             # h1 -> h2
2657
2658         vpaddq          $D4,$H0,$H0
2659         vpsllq          \$2,$D4,$D4
2660          vpsrlq         \$26,$T0,$T1
2661          vpsrlq         \$40,$T4,$T4            # 4
2662         vpaddq          $D4,$H0,$H0             # h4 -> h0
2663
2664         vpsrlq          \$26,$H2,$D2
2665         vpandq          $MASK,$H2,$H2
2666          vpandq         $MASK,$T2,$T2           # 2
2667          vpandq         $MASK,$T0,$T0           # 0
2668         vpaddq          $D2,$H3,$H3             # h2 -> h3
2669
2670         vpsrlq          \$26,$H0,$D0
2671         vpandq          $MASK,$H0,$H0
2672          vpaddq         $H2,$T2,$H2             # accumulate input for .Ltail_avx2
2673          vpandq         $MASK,$T1,$T1           # 1
2674         vpaddq          $D0,$H1,$H1             # h0 -> h1
2675
2676         vpsrlq          \$26,$H3,$D3
2677         vpandq          $MASK,$H3,$H3
2678          vpandq         $MASK,$T3,$T3           # 3
2679          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2680         vpaddq          $D3,$H4,$H4             # h3 -> h4
2681
2682         lea             0x90(%rsp),%rax         # size optimization for .Ltail_avx2
2683         add             \$64,$len
2684         jnz             .Ltail_avx2
2685
2686         vpsubq          $T2,$H2,$H2             # undo input accumulation
2687         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2688         vmovd           %x#$H1,`4*1-48-64`($ctx)
2689         vmovd           %x#$H2,`4*2-48-64`($ctx)
2690         vmovd           %x#$H3,`4*3-48-64`($ctx)
2691         vmovd           %x#$H4,`4*4-48-64`($ctx)
2692         vzeroall
2693 ___
2694 $code.=<<___    if ($win64);
2695         movdqa          0x50(%r11),%xmm6
2696         movdqa          0x60(%r11),%xmm7
2697         movdqa          0x70(%r11),%xmm8
2698         movdqa          0x80(%r11),%xmm9
2699         movdqa          0x90(%r11),%xmm10
2700         movdqa          0xa0(%r11),%xmm11
2701         movdqa          0xb0(%r11),%xmm12
2702         movdqa          0xc0(%r11),%xmm13
2703         movdqa          0xd0(%r11),%xmm14
2704         movdqa          0xe0(%r11),%xmm15
2705         lea             0xf8(%r11),%rsp
2706 .Ldo_avx512_epilogue:
2707 ___
2708 $code.=<<___    if (!$win64);
2709         lea             8(%r11),%rsp
2710 .cfi_def_cfa            %rsp,8
2711 ___
2712 $code.=<<___;
2713         ret
2714 .cfi_endproc
2715 .size   poly1305_blocks_avx512,.-poly1305_blocks_avx512
2716 ___
2717 if ($avx>3) {
2718 ########################################################################
2719 # VPMADD52 version using 2^44 radix.
2720 #
2721 # One can argue that base 2^52 would be more natural. Well, even though
2722 # some operations would be more natural, one has to recognize couple of
2723 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2724 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2725 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2726 # reference implementations], which means that more such operations
2727 # would have to be performed in inner loop, which in turn makes critical
2728 # path longer. In other words, even though base 2^44 reduction might
2729 # look less elegant, overall critical path is actually shorter...
2730
2731 $code.=<<___;
2732 .type   poly1305_init_base2_44,\@function,3
2733 .align  32
2734 poly1305_init_base2_44:
2735         xor     %rax,%rax
2736         mov     %rax,0($ctx)            # initialize hash value
2737         mov     %rax,8($ctx)
2738         mov     %rax,16($ctx)
2739
2740 .Linit_base2_44:
2741         lea     poly1305_blocks_vpmadd52(%rip),%r10
2742         lea     poly1305_emit_base2_44(%rip),%r11
2743
2744         mov     \$0x0ffffffc0fffffff,%rax
2745         mov     \$0x0ffffffc0ffffffc,%rcx
2746         and     0($inp),%rax
2747         mov     \$0x00000fffffffffff,%r8
2748         and     8($inp),%rcx
2749         mov     \$0x00000fffffffffff,%r9
2750         and     %rax,%r8
2751         shrd    \$44,%rcx,%rax
2752         mov     %r8,40($ctx)            # r0
2753         and     %r9,%rax
2754         shr     \$24,%rcx
2755         mov     %rax,48($ctx)           # r1
2756         lea     (%rax,%rax,4),%rax      # *5
2757         mov     %rcx,56($ctx)           # r2
2758         shl     \$2,%rax                # magic <<2
2759         lea     (%rcx,%rcx,4),%rcx      # *5
2760         shl     \$2,%rcx                # magic <<2
2761         mov     %rax,24($ctx)           # s1
2762         mov     %rcx,32($ctx)           # s2
2763 ___
2764 $code.=<<___    if ($flavour !~ /elf32/);
2765         mov     %r10,0(%rdx)
2766         mov     %r11,8(%rdx)
2767 ___
2768 $code.=<<___    if ($flavour =~ /elf32/);
2769         mov     %r10d,0(%rdx)
2770         mov     %r11d,4(%rdx)
2771 ___
2772 $code.=<<___;
2773         mov     \$1,%eax
2774         ret
2775 .size   poly1305_init_base2_44,.-poly1305_init_base2_44
2776 ___
2777 {
2778 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2779 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2780 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2781
2782 $code.=<<___;
2783 .type   poly1305_blocks_vpmadd52,\@function,4
2784 .align  32
2785 poly1305_blocks_vpmadd52:
2786         shr     \$4,$len
2787         jz      .Lno_data_vpmadd52              # too short
2788
2789         mov             \$7,%r10d
2790         mov             \$1,%r11d
2791         kmovw           %r10d,%k7
2792         lea             .L2_44_inp_permd(%rip),%r10
2793         shl             \$40,$padbit
2794         kmovw           %r11d,%k1
2795
2796         vmovq           $padbit,%x#$PAD
2797         vmovdqa64       0(%r10),$inp_permd      # .L2_44_inp_permd
2798         vmovdqa64       32(%r10),$inp_shift     # .L2_44_inp_shift
2799         vpermq          \$0xcf,$PAD,$PAD
2800         vmovdqa64       64(%r10),$reduc_mask    # .L2_44_mask
2801
2802         vmovdqu64       0($ctx),${Dlo}{%k7}{z}          # load hash value
2803         vmovdqu64       40($ctx),${r2r1r0}{%k7}{z}      # load keys
2804         vmovdqu64       32($ctx),${r1r0s2}{%k7}{z}
2805         vmovdqu64       24($ctx),${r0s2s1}{%k7}{z}
2806
2807         vmovdqa64       96(%r10),$reduc_rght    # .L2_44_shift_rgt
2808         vmovdqa64       128(%r10),$reduc_left   # .L2_44_shift_lft
2809
2810         jmp             .Loop_vpmadd52
2811
2812 .align  32
2813 .Loop_vpmadd52:
2814         vmovdqu32       0($inp),%x#$T0          # load input as ----3210
2815         lea             16($inp),$inp
2816
2817         vpermd          $T0,$inp_permd,$T0      # ----3210 -> --322110
2818         vpsrlvq         $inp_shift,$T0,$T0
2819         vpandq          $reduc_mask,$T0,$T0
2820         vporq           $PAD,$T0,$T0
2821
2822         vpaddq          $T0,$Dlo,$Dlo           # accumulate input
2823
2824         vpermq          \$0,$Dlo,${H0}{%k7}{z}  # smash hash value
2825         vpermq          \$0b01010101,$Dlo,${H1}{%k7}{z}
2826         vpermq          \$0b10101010,$Dlo,${H2}{%k7}{z}
2827
2828         vpxord          $Dlo,$Dlo,$Dlo
2829         vpxord          $Dhi,$Dhi,$Dhi
2830
2831         vpmadd52luq     $r2r1r0,$H0,$Dlo
2832         vpmadd52huq     $r2r1r0,$H0,$Dhi
2833
2834         vpmadd52luq     $r1r0s2,$H1,$Dlo
2835         vpmadd52huq     $r1r0s2,$H1,$Dhi
2836
2837         vpmadd52luq     $r0s2s1,$H2,$Dlo
2838         vpmadd52huq     $r0s2s1,$H2,$Dhi
2839
2840         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost qword
2841         vpsllvq         $reduc_left,$Dhi,$Dhi   # 0 in topmost qword
2842         vpandq          $reduc_mask,$Dlo,$Dlo
2843
2844         vpaddq          $T0,$Dhi,$Dhi
2845
2846         vpermq          \$0b10010011,$Dhi,$Dhi  # 0 in lowest qword
2847
2848         vpaddq          $Dhi,$Dlo,$Dlo          # note topmost qword :-)
2849
2850         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost word
2851         vpandq          $reduc_mask,$Dlo,$Dlo
2852
2853         vpermq          \$0b10010011,$T0,$T0
2854
2855         vpaddq          $T0,$Dlo,$Dlo
2856
2857         vpermq          \$0b10010011,$Dlo,${T0}{%k1}{z}
2858
2859         vpaddq          $T0,$Dlo,$Dlo
2860         vpsllq          \$2,$T0,$T0
2861
2862         vpaddq          $T0,$Dlo,$Dlo
2863
2864         dec             $len                    # len-=16
2865         jnz             .Loop_vpmadd52
2866
2867         vmovdqu64       $Dlo,0($ctx){%k7}       # store hash value
2868
2869 .Lno_data_vpmadd52:
2870         ret
2871 .size   poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
2872 ___
2873 }
2874 $code.=<<___;
2875 .type   poly1305_emit_base2_44,\@function,3
2876 .align  32
2877 poly1305_emit_base2_44:
2878         mov     0($ctx),%r8     # load hash value
2879         mov     8($ctx),%r9
2880         mov     16($ctx),%r10
2881
2882         mov     %r9,%rax
2883         shr     \$20,%r9
2884         shl     \$44,%rax
2885         mov     %r10,%rcx
2886         shr     \$40,%r10
2887         shl     \$24,%rcx
2888
2889         add     %rax,%r8
2890         adc     %rcx,%r9
2891         adc     \$0,%r10
2892
2893         mov     %r8,%rax
2894         add     \$5,%r8         # compare to modulus
2895         mov     %r9,%rcx
2896         adc     \$0,%r9
2897         adc     \$0,%r10
2898         shr     \$2,%r10        # did 130-bit value overfow?
2899         cmovnz  %r8,%rax
2900         cmovnz  %r9,%rcx
2901
2902         add     0($nonce),%rax  # accumulate nonce
2903         adc     8($nonce),%rcx
2904         mov     %rax,0($mac)    # write result
2905         mov     %rcx,8($mac)
2906
2907         ret
2908 .size   poly1305_emit_base2_44,.-poly1305_emit_base2_44
2909 ___
2910 }       }       }
2911 $code.=<<___;
2912 .align  64
2913 .Lconst:
2914 .Lmask24:
2915 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2916 .L129:
2917 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2918 .Lmask26:
2919 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2920 .Lpermd_avx2:
2921 .long   2,2,2,3,2,0,2,1
2922
2923 .L2_44_inp_permd:
2924 .long   0,1,1,2,2,3,7,7
2925 .L2_44_inp_shift:
2926 .quad   0,12,24,64
2927 .L2_44_mask:
2928 .quad   0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
2929 .L2_44_shift_rgt:
2930 .quad   44,44,42,64
2931 .L2_44_shift_lft:
2932 .quad   8,8,10,64
2933 ___
2934 }
2935
2936 $code.=<<___;
2937 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2938 .align  16
2939 ___
2940
2941 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2942 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2943 if ($win64) {
2944 $rec="%rcx";
2945 $frame="%rdx";
2946 $context="%r8";
2947 $disp="%r9";
2948
2949 $code.=<<___;
2950 .extern __imp_RtlVirtualUnwind
2951 .type   se_handler,\@abi-omnipotent
2952 .align  16
2953 se_handler:
2954         push    %rsi
2955         push    %rdi
2956         push    %rbx
2957         push    %rbp
2958         push    %r12
2959         push    %r13
2960         push    %r14
2961         push    %r15
2962         pushfq
2963         sub     \$64,%rsp
2964
2965         mov     120($context),%rax      # pull context->Rax
2966         mov     248($context),%rbx      # pull context->Rip
2967
2968         mov     8($disp),%rsi           # disp->ImageBase
2969         mov     56($disp),%r11          # disp->HandlerData
2970
2971         mov     0(%r11),%r10d           # HandlerData[0]
2972         lea     (%rsi,%r10),%r10        # prologue label
2973         cmp     %r10,%rbx               # context->Rip<.Lprologue
2974         jb      .Lcommon_seh_tail
2975
2976         mov     152($context),%rax      # pull context->Rsp
2977
2978         mov     4(%r11),%r10d           # HandlerData[1]
2979         lea     (%rsi,%r10),%r10        # epilogue label
2980         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2981         jae     .Lcommon_seh_tail
2982
2983         lea     48(%rax),%rax
2984
2985         mov     -8(%rax),%rbx
2986         mov     -16(%rax),%rbp
2987         mov     -24(%rax),%r12
2988         mov     -32(%rax),%r13
2989         mov     -40(%rax),%r14
2990         mov     -48(%rax),%r15
2991         mov     %rbx,144($context)      # restore context->Rbx
2992         mov     %rbp,160($context)      # restore context->Rbp
2993         mov     %r12,216($context)      # restore context->R12
2994         mov     %r13,224($context)      # restore context->R13
2995         mov     %r14,232($context)      # restore context->R14
2996         mov     %r15,240($context)      # restore context->R14
2997
2998         jmp     .Lcommon_seh_tail
2999 .size   se_handler,.-se_handler
3000
3001 .type   avx_handler,\@abi-omnipotent
3002 .align  16
3003 avx_handler:
3004         push    %rsi
3005         push    %rdi
3006         push    %rbx
3007         push    %rbp
3008         push    %r12
3009         push    %r13
3010         push    %r14
3011         push    %r15
3012         pushfq
3013         sub     \$64,%rsp
3014
3015         mov     120($context),%rax      # pull context->Rax
3016         mov     248($context),%rbx      # pull context->Rip
3017
3018         mov     8($disp),%rsi           # disp->ImageBase
3019         mov     56($disp),%r11          # disp->HandlerData
3020
3021         mov     0(%r11),%r10d           # HandlerData[0]
3022         lea     (%rsi,%r10),%r10        # prologue label
3023         cmp     %r10,%rbx               # context->Rip<prologue label
3024         jb      .Lcommon_seh_tail
3025
3026         mov     152($context),%rax      # pull context->Rsp
3027
3028         mov     4(%r11),%r10d           # HandlerData[1]
3029         lea     (%rsi,%r10),%r10        # epilogue label
3030         cmp     %r10,%rbx               # context->Rip>=epilogue label
3031         jae     .Lcommon_seh_tail
3032
3033         mov     208($context),%rax      # pull context->R11
3034
3035         lea     0x50(%rax),%rsi
3036         lea     0xf8(%rax),%rax
3037         lea     512($context),%rdi      # &context.Xmm6
3038         mov     \$20,%ecx
3039         .long   0xa548f3fc              # cld; rep movsq
3040
3041 .Lcommon_seh_tail:
3042         mov     8(%rax),%rdi
3043         mov     16(%rax),%rsi
3044         mov     %rax,152($context)      # restore context->Rsp
3045         mov     %rsi,168($context)      # restore context->Rsi
3046         mov     %rdi,176($context)      # restore context->Rdi
3047
3048         mov     40($disp),%rdi          # disp->ContextRecord
3049         mov     $context,%rsi           # context
3050         mov     \$154,%ecx              # sizeof(CONTEXT)
3051         .long   0xa548f3fc              # cld; rep movsq
3052
3053         mov     $disp,%rsi
3054         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
3055         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
3056         mov     0(%rsi),%r8             # arg3, disp->ControlPc
3057         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
3058         mov     40(%rsi),%r10           # disp->ContextRecord
3059         lea     56(%rsi),%r11           # &disp->HandlerData
3060         lea     24(%rsi),%r12           # &disp->EstablisherFrame
3061         mov     %r10,32(%rsp)           # arg5
3062         mov     %r11,40(%rsp)           # arg6
3063         mov     %r12,48(%rsp)           # arg7
3064         mov     %rcx,56(%rsp)           # arg8, (NULL)
3065         call    *__imp_RtlVirtualUnwind(%rip)
3066
3067         mov     \$1,%eax                # ExceptionContinueSearch
3068         add     \$64,%rsp
3069         popfq
3070         pop     %r15
3071         pop     %r14
3072         pop     %r13
3073         pop     %r12
3074         pop     %rbp
3075         pop     %rbx
3076         pop     %rdi
3077         pop     %rsi
3078         ret
3079 .size   avx_handler,.-avx_handler
3080
3081 .section        .pdata
3082 .align  4
3083         .rva    .LSEH_begin_poly1305_init
3084         .rva    .LSEH_end_poly1305_init
3085         .rva    .LSEH_info_poly1305_init
3086
3087         .rva    .LSEH_begin_poly1305_blocks
3088         .rva    .LSEH_end_poly1305_blocks
3089         .rva    .LSEH_info_poly1305_blocks
3090
3091         .rva    .LSEH_begin_poly1305_emit
3092         .rva    .LSEH_end_poly1305_emit
3093         .rva    .LSEH_info_poly1305_emit
3094 ___
3095 $code.=<<___ if ($avx);
3096         .rva    .LSEH_begin_poly1305_blocks_avx
3097         .rva    .Lbase2_64_avx
3098         .rva    .LSEH_info_poly1305_blocks_avx_1
3099