x86_64 assembly pack: "optimize" for Knights Landing, add AVX-512 results.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Initial release.
22 #
23 # December 2016
24 #
25 # Add AVX512F+VL+BW code path.
26 #
27 # Numbers are cycles per processed byte with poly1305_blocks alone,
28 # measured with rdtsc at fixed clock frequency.
29 #
30 #               IALU/gcc-4.8(*) AVX(**)         AVX2    AVX-512
31 # P4            4.46/+120%      -
32 # Core 2        2.41/+90%       -
33 # Westmere      1.88/+120%      -
34 # Sandy Bridge  1.39/+140%      1.10
35 # Haswell       1.14/+175%      1.11            0.65
36 # Skylake[-X]   1.13/+120%      0.96            0.51    [0.35]
37 # Silvermont    2.83/+95%       -
38 # Knights L     3.60/-          1.65            1.10    (***)
39 # Goldmont      1.70/+180%      -
40 # VIA Nano      1.82/+150%      -
41 # Sledgehammer  1.38/+160%      -
42 # Bulldozer     2.30/+130%      0.97
43 # Ryzen         1.15/+200%      1.08            1.18
44 #
45 # (*)   improvement coefficients relative to clang are more modest and
46 #       are ~50% on most processors, in both cases we are comparing to
47 #       __int128 code;
48 # (**)  SSE2 implementation was attempted, but among non-AVX processors
49 #       it was faster than integer-only code only on older Intel P4 and
50 #       Core processors, 50-30%, less newer processor is, but slower on
51 #       contemporary ones, for example almost 2x slower on Atom, and as
52 #       former are naturally disappearing, SSE2 is deemed unnecessary;
53 # (***) Current AVX-512 code requires BW and VL extensions and can not
54 #       execute on Knights Landing;
55
56 $flavour = shift;
57 $output  = shift;
58 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
59
60 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
61
62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
64 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
65 die "can't locate x86_64-xlate.pl";
66
67 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
68                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
69         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26);
70 }
71
72 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
73            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
74         $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12);
75         $avx += 2 if ($1==2.11 && $2>=8);
76 }
77
78 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
79            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
80         $avx = ($1>=10) + ($1>=12);
81 }
82
83 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
84         $avx = ($2>=3.0) + ($2>3.0);
85 }
86
87 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
88 *STDOUT=*OUT;
89
90 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
91 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
92 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
93 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
94
95 sub poly1305_iteration {
96 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
97 # output:       $h0-$h2 *= $r0-$r1
98 $code.=<<___;
99         mulq    $h0                     # h0*r1
100         mov     %rax,$d2
101          mov    $r0,%rax
102         mov     %rdx,$d3
103
104         mulq    $h0                     # h0*r0
105         mov     %rax,$h0                # future $h0
106          mov    $r0,%rax
107         mov     %rdx,$d1
108
109         mulq    $h1                     # h1*r0
110         add     %rax,$d2
111          mov    $s1,%rax
112         adc     %rdx,$d3
113
114         mulq    $h1                     # h1*s1
115          mov    $h2,$h1                 # borrow $h1
116         add     %rax,$h0
117         adc     %rdx,$d1
118
119         imulq   $s1,$h1                 # h2*s1
120         add     $h1,$d2
121          mov    $d1,$h1
122         adc     \$0,$d3
123
124         imulq   $r0,$h2                 # h2*r0
125         add     $d2,$h1
126         mov     \$-4,%rax               # mask value
127         adc     $h2,$d3
128
129         and     $d3,%rax                # last reduction step
130         mov     $d3,$h2
131         shr     \$2,$d3
132         and     \$3,$h2
133         add     $d3,%rax
134         add     %rax,$h0
135         adc     \$0,$h1
136         adc     \$0,$h2
137 ___
138 }
139
140 ########################################################################
141 # Layout of opaque area is following.
142 #
143 #       unsigned __int64 h[3];          # current hash value base 2^64
144 #       unsigned __int64 r[2];          # key value base 2^64
145
146 $code.=<<___;
147 .text
148
149 .extern OPENSSL_ia32cap_P
150
151 .globl  poly1305_init
152 .hidden poly1305_init
153 .globl  poly1305_blocks
154 .hidden poly1305_blocks
155 .globl  poly1305_emit
156 .hidden poly1305_emit
157
158 .type   poly1305_init,\@function,3
159 .align  32
160 poly1305_init:
161         xor     %rax,%rax
162         mov     %rax,0($ctx)            # initialize hash value
163         mov     %rax,8($ctx)
164         mov     %rax,16($ctx)
165
166         cmp     \$0,$inp
167         je      .Lno_key
168
169         lea     poly1305_blocks(%rip),%r10
170         lea     poly1305_emit(%rip),%r11
171 ___
172 $code.=<<___    if ($avx);
173         mov     OPENSSL_ia32cap_P+4(%rip),%r9
174         lea     poly1305_blocks_avx(%rip),%rax
175         lea     poly1305_emit_avx(%rip),%rcx
176         bt      \$`60-32`,%r9           # AVX?
177         cmovc   %rax,%r10
178         cmovc   %rcx,%r11
179 ___
180 $code.=<<___    if ($avx>1);
181         lea     poly1305_blocks_avx2(%rip),%rax
182         bt      \$`5+32`,%r9            # AVX2?
183         cmovc   %rax,%r10
184 ___
185 $code.=<<___    if ($avx>3);
186         mov     \$`(1<<31|1<<21|1<<16)`,%rax
187         shr     \$32,%r9
188         and     %rax,%r9
189         cmp     %rax,%r9
190         je      .Linit_base2_44
191 ___
192 $code.=<<___;
193         mov     \$0x0ffffffc0fffffff,%rax
194         mov     \$0x0ffffffc0ffffffc,%rcx
195         and     0($inp),%rax
196         and     8($inp),%rcx
197         mov     %rax,24($ctx)
198         mov     %rcx,32($ctx)
199 ___
200 $code.=<<___    if ($flavour !~ /elf32/);
201         mov     %r10,0(%rdx)
202         mov     %r11,8(%rdx)
203 ___
204 $code.=<<___    if ($flavour =~ /elf32/);
205         mov     %r10d,0(%rdx)
206         mov     %r11d,4(%rdx)
207 ___
208 $code.=<<___;
209         mov     \$1,%eax
210 .Lno_key:
211         ret
212 .size   poly1305_init,.-poly1305_init
213
214 .type   poly1305_blocks,\@function,4
215 .align  32
216 poly1305_blocks:
217 .cfi_startproc
218 .Lblocks:
219         shr     \$4,$len
220         jz      .Lno_data               # too short
221
222         push    %rbx
223 .cfi_push       %rbx
224         push    %rbp
225 .cfi_push       %rbp
226         push    %r12
227 .cfi_push       %r12
228         push    %r13
229 .cfi_push       %r13
230         push    %r14
231 .cfi_push       %r14
232         push    %r15
233 .cfi_push       %r15
234 .Lblocks_body:
235
236         mov     $len,%r15               # reassign $len
237
238         mov     24($ctx),$r0            # load r
239         mov     32($ctx),$s1
240
241         mov     0($ctx),$h0             # load hash value
242         mov     8($ctx),$h1
243         mov     16($ctx),$h2
244
245         mov     $s1,$r1
246         shr     \$2,$s1
247         mov     $r1,%rax
248         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
249         jmp     .Loop
250
251 .align  32
252 .Loop:
253         add     0($inp),$h0             # accumulate input
254         adc     8($inp),$h1
255         lea     16($inp),$inp
256         adc     $padbit,$h2
257 ___
258         &poly1305_iteration();
259 $code.=<<___;
260         mov     $r1,%rax
261         dec     %r15                    # len-=16
262         jnz     .Loop
263
264         mov     $h0,0($ctx)             # store hash value
265         mov     $h1,8($ctx)
266         mov     $h2,16($ctx)
267
268         mov     0(%rsp),%r15
269 .cfi_restore    %r15
270         mov     8(%rsp),%r14
271 .cfi_restore    %r14
272         mov     16(%rsp),%r13
273 .cfi_restore    %r13
274         mov     24(%rsp),%r12
275 .cfi_restore    %r12
276         mov     32(%rsp),%rbp
277 .cfi_restore    %rbp
278         mov     40(%rsp),%rbx
279 .cfi_restore    %rbx
280         lea     48(%rsp),%rsp
281 .cfi_adjust_cfa_offset  -48
282 .Lno_data:
283 .Lblocks_epilogue:
284         ret
285 .cfi_endproc
286 .size   poly1305_blocks,.-poly1305_blocks
287
288 .type   poly1305_emit,\@function,3
289 .align  32
290 poly1305_emit:
291 .Lemit:
292         mov     0($ctx),%r8     # load hash value
293         mov     8($ctx),%r9
294         mov     16($ctx),%r10
295
296         mov     %r8,%rax
297         add     \$5,%r8         # compare to modulus
298         mov     %r9,%rcx
299         adc     \$0,%r9
300         adc     \$0,%r10
301         shr     \$2,%r10        # did 130-bit value overfow?
302         cmovnz  %r8,%rax
303         cmovnz  %r9,%rcx
304
305         add     0($nonce),%rax  # accumulate nonce
306         adc     8($nonce),%rcx
307         mov     %rax,0($mac)    # write result
308         mov     %rcx,8($mac)
309
310         ret
311 .size   poly1305_emit,.-poly1305_emit
312 ___
313 if ($avx) {
314
315 ########################################################################
316 # Layout of opaque area is following.
317 #
318 #       unsigned __int32 h[5];          # current hash value base 2^26
319 #       unsigned __int32 is_base2_26;
320 #       unsigned __int64 r[2];          # key value base 2^64
321 #       unsigned __int64 pad;
322 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
323 #
324 # where r^n are base 2^26 digits of degrees of multiplier key. There are
325 # 5 digits, but last four are interleaved with multiples of 5, totalling
326 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
327
328 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
329     map("%xmm$_",(0..15));
330
331 $code.=<<___;
332 .type   __poly1305_block,\@abi-omnipotent
333 .align  32
334 __poly1305_block:
335 ___
336         &poly1305_iteration();
337 $code.=<<___;
338         ret
339 .size   __poly1305_block,.-__poly1305_block
340
341 .type   __poly1305_init_avx,\@abi-omnipotent
342 .align  32
343 __poly1305_init_avx:
344         mov     $r0,$h0
345         mov     $r1,$h1
346         xor     $h2,$h2
347
348         lea     48+64($ctx),$ctx        # size optimization
349
350         mov     $r1,%rax
351         call    __poly1305_block        # r^2
352
353         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
354         mov     \$0x3ffffff,%edx
355         mov     $h0,$d1
356         and     $h0#d,%eax
357         mov     $r0,$d2
358         and     $r0#d,%edx
359         mov     %eax,`16*0+0-64`($ctx)
360         shr     \$26,$d1
361         mov     %edx,`16*0+4-64`($ctx)
362         shr     \$26,$d2
363
364         mov     \$0x3ffffff,%eax
365         mov     \$0x3ffffff,%edx
366         and     $d1#d,%eax
367         and     $d2#d,%edx
368         mov     %eax,`16*1+0-64`($ctx)
369         lea     (%rax,%rax,4),%eax      # *5
370         mov     %edx,`16*1+4-64`($ctx)
371         lea     (%rdx,%rdx,4),%edx      # *5
372         mov     %eax,`16*2+0-64`($ctx)
373         shr     \$26,$d1
374         mov     %edx,`16*2+4-64`($ctx)
375         shr     \$26,$d2
376
377         mov     $h1,%rax
378         mov     $r1,%rdx
379         shl     \$12,%rax
380         shl     \$12,%rdx
381         or      $d1,%rax
382         or      $d2,%rdx
383         and     \$0x3ffffff,%eax
384         and     \$0x3ffffff,%edx
385         mov     %eax,`16*3+0-64`($ctx)
386         lea     (%rax,%rax,4),%eax      # *5
387         mov     %edx,`16*3+4-64`($ctx)
388         lea     (%rdx,%rdx,4),%edx      # *5
389         mov     %eax,`16*4+0-64`($ctx)
390         mov     $h1,$d1
391         mov     %edx,`16*4+4-64`($ctx)
392         mov     $r1,$d2
393
394         mov     \$0x3ffffff,%eax
395         mov     \$0x3ffffff,%edx
396         shr     \$14,$d1
397         shr     \$14,$d2
398         and     $d1#d,%eax
399         and     $d2#d,%edx
400         mov     %eax,`16*5+0-64`($ctx)
401         lea     (%rax,%rax,4),%eax      # *5
402         mov     %edx,`16*5+4-64`($ctx)
403         lea     (%rdx,%rdx,4),%edx      # *5
404         mov     %eax,`16*6+0-64`($ctx)
405         shr     \$26,$d1
406         mov     %edx,`16*6+4-64`($ctx)
407         shr     \$26,$d2
408
409         mov     $h2,%rax
410         shl     \$24,%rax
411         or      %rax,$d1
412         mov     $d1#d,`16*7+0-64`($ctx)
413         lea     ($d1,$d1,4),$d1         # *5
414         mov     $d2#d,`16*7+4-64`($ctx)
415         lea     ($d2,$d2,4),$d2         # *5
416         mov     $d1#d,`16*8+0-64`($ctx)
417         mov     $d2#d,`16*8+4-64`($ctx)
418
419         mov     $r1,%rax
420         call    __poly1305_block        # r^3
421
422         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
423         mov     $h0,$d1
424         and     $h0#d,%eax
425         shr     \$26,$d1
426         mov     %eax,`16*0+12-64`($ctx)
427
428         mov     \$0x3ffffff,%edx
429         and     $d1#d,%edx
430         mov     %edx,`16*1+12-64`($ctx)
431         lea     (%rdx,%rdx,4),%edx      # *5
432         shr     \$26,$d1
433         mov     %edx,`16*2+12-64`($ctx)
434
435         mov     $h1,%rax
436         shl     \$12,%rax
437         or      $d1,%rax
438         and     \$0x3ffffff,%eax
439         mov     %eax,`16*3+12-64`($ctx)
440         lea     (%rax,%rax,4),%eax      # *5
441         mov     $h1,$d1
442         mov     %eax,`16*4+12-64`($ctx)
443
444         mov     \$0x3ffffff,%edx
445         shr     \$14,$d1
446         and     $d1#d,%edx
447         mov     %edx,`16*5+12-64`($ctx)
448         lea     (%rdx,%rdx,4),%edx      # *5
449         shr     \$26,$d1
450         mov     %edx,`16*6+12-64`($ctx)
451
452         mov     $h2,%rax
453         shl     \$24,%rax
454         or      %rax,$d1
455         mov     $d1#d,`16*7+12-64`($ctx)
456         lea     ($d1,$d1,4),$d1         # *5
457         mov     $d1#d,`16*8+12-64`($ctx)
458
459         mov     $r1,%rax
460         call    __poly1305_block        # r^4
461
462         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
463         mov     $h0,$d1
464         and     $h0#d,%eax
465         shr     \$26,$d1
466         mov     %eax,`16*0+8-64`($ctx)
467
468         mov     \$0x3ffffff,%edx
469         and     $d1#d,%edx
470         mov     %edx,`16*1+8-64`($ctx)
471         lea     (%rdx,%rdx,4),%edx      # *5
472         shr     \$26,$d1
473         mov     %edx,`16*2+8-64`($ctx)
474
475         mov     $h1,%rax
476         shl     \$12,%rax
477         or      $d1,%rax
478         and     \$0x3ffffff,%eax
479         mov     %eax,`16*3+8-64`($ctx)
480         lea     (%rax,%rax,4),%eax      # *5
481         mov     $h1,$d1
482         mov     %eax,`16*4+8-64`($ctx)
483
484         mov     \$0x3ffffff,%edx
485         shr     \$14,$d1
486         and     $d1#d,%edx
487         mov     %edx,`16*5+8-64`($ctx)
488         lea     (%rdx,%rdx,4),%edx      # *5
489         shr     \$26,$d1
490         mov     %edx,`16*6+8-64`($ctx)
491
492         mov     $h2,%rax
493         shl     \$24,%rax
494         or      %rax,$d1
495         mov     $d1#d,`16*7+8-64`($ctx)
496         lea     ($d1,$d1,4),$d1         # *5
497         mov     $d1#d,`16*8+8-64`($ctx)
498
499         lea     -48-64($ctx),$ctx       # size [de-]optimization
500         ret
501 .size   __poly1305_init_avx,.-__poly1305_init_avx
502
503 .type   poly1305_blocks_avx,\@function,4
504 .align  32
505 poly1305_blocks_avx:
506 .cfi_startproc
507         mov     20($ctx),%r8d           # is_base2_26
508         cmp     \$128,$len
509         jae     .Lblocks_avx
510         test    %r8d,%r8d
511         jz      .Lblocks
512
513 .Lblocks_avx:
514         and     \$-16,$len
515         jz      .Lno_data_avx
516
517         vzeroupper
518
519         test    %r8d,%r8d
520         jz      .Lbase2_64_avx
521
522         test    \$31,$len
523         jz      .Leven_avx
524
525         push    %rbx
526 .cfi_push       %rbx
527         push    %rbp
528 .cfi_push       %rbp
529         push    %r12
530 .cfi_push       %r12
531         push    %r13
532 .cfi_push       %r13
533         push    %r14
534 .cfi_push       %r14
535         push    %r15
536 .cfi_push       %r15
537 .Lblocks_avx_body:
538
539         mov     $len,%r15               # reassign $len
540
541         mov     0($ctx),$d1             # load hash value
542         mov     8($ctx),$d2
543         mov     16($ctx),$h2#d
544
545         mov     24($ctx),$r0            # load r
546         mov     32($ctx),$s1
547
548         ################################# base 2^26 -> base 2^64
549         mov     $d1#d,$h0#d
550         and     \$`-1*(1<<31)`,$d1
551         mov     $d2,$r1                 # borrow $r1
552         mov     $d2#d,$h1#d
553         and     \$`-1*(1<<31)`,$d2
554
555         shr     \$6,$d1
556         shl     \$52,$r1
557         add     $d1,$h0
558         shr     \$12,$h1
559         shr     \$18,$d2
560         add     $r1,$h0
561         adc     $d2,$h1
562
563         mov     $h2,$d1
564         shl     \$40,$d1
565         shr     \$24,$h2
566         add     $d1,$h1
567         adc     \$0,$h2                 # can be partially reduced...
568
569         mov     \$-4,$d2                # ... so reduce
570         mov     $h2,$d1
571         and     $h2,$d2
572         shr     \$2,$d1
573         and     \$3,$h2
574         add     $d2,$d1                 # =*5
575         add     $d1,$h0
576         adc     \$0,$h1
577         adc     \$0,$h2
578
579         mov     $s1,$r1
580         mov     $s1,%rax
581         shr     \$2,$s1
582         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
583
584         add     0($inp),$h0             # accumulate input
585         adc     8($inp),$h1
586         lea     16($inp),$inp
587         adc     $padbit,$h2
588
589         call    __poly1305_block
590
591         test    $padbit,$padbit         # if $padbit is zero,
592         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
593
594         ################################# base 2^64 -> base 2^26
595         mov     $h0,%rax
596         mov     $h0,%rdx
597         shr     \$52,$h0
598         mov     $h1,$r0
599         mov     $h1,$r1
600         shr     \$26,%rdx
601         and     \$0x3ffffff,%rax        # h[0]
602         shl     \$12,$r0
603         and     \$0x3ffffff,%rdx        # h[1]
604         shr     \$14,$h1
605         or      $r0,$h0
606         shl     \$24,$h2
607         and     \$0x3ffffff,$h0         # h[2]
608         shr     \$40,$r1
609         and     \$0x3ffffff,$h1         # h[3]
610         or      $r1,$h2                 # h[4]
611
612         sub     \$16,%r15
613         jz      .Lstore_base2_26_avx
614
615         vmovd   %rax#d,$H0
616         vmovd   %rdx#d,$H1
617         vmovd   $h0#d,$H2
618         vmovd   $h1#d,$H3
619         vmovd   $h2#d,$H4
620         jmp     .Lproceed_avx
621
622 .align  32
623 .Lstore_base2_64_avx:
624         mov     $h0,0($ctx)
625         mov     $h1,8($ctx)
626         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
627         jmp     .Ldone_avx
628
629 .align  16
630 .Lstore_base2_26_avx:
631         mov     %rax#d,0($ctx)          # store hash value base 2^26
632         mov     %rdx#d,4($ctx)
633         mov     $h0#d,8($ctx)
634         mov     $h1#d,12($ctx)
635         mov     $h2#d,16($ctx)
636 .align  16
637 .Ldone_avx:
638         mov     0(%rsp),%r15
639 .cfi_restore    %r15
640         mov     8(%rsp),%r14
641 .cfi_restore    %r14
642         mov     16(%rsp),%r13
643 .cfi_restore    %r13
644         mov     24(%rsp),%r12
645 .cfi_restore    %r12
646         mov     32(%rsp),%rbp
647 .cfi_restore    %rbp
648         mov     40(%rsp),%rbx
649 .cfi_restore    %rbx
650         lea     48(%rsp),%rsp
651 .cfi_adjust_cfa_offset  -48
652 .Lno_data_avx:
653 .Lblocks_avx_epilogue:
654         ret
655 .cfi_endproc
656
657 .align  32
658 .Lbase2_64_avx:
659 .cfi_startproc
660         push    %rbx
661 .cfi_push       %rbx
662         push    %rbp
663 .cfi_push       %rbp
664         push    %r12
665 .cfi_push       %r12
666         push    %r13
667 .cfi_push       %r13
668         push    %r14
669 .cfi_push       %r14
670         push    %r15
671 .cfi_push       %r15
672 .Lbase2_64_avx_body:
673
674         mov     $len,%r15               # reassign $len
675
676         mov     24($ctx),$r0            # load r
677         mov     32($ctx),$s1
678
679         mov     0($ctx),$h0             # load hash value
680         mov     8($ctx),$h1
681         mov     16($ctx),$h2#d
682
683         mov     $s1,$r1
684         mov     $s1,%rax
685         shr     \$2,$s1
686         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
687
688         test    \$31,$len
689         jz      .Linit_avx
690
691         add     0($inp),$h0             # accumulate input
692         adc     8($inp),$h1
693         lea     16($inp),$inp
694         adc     $padbit,$h2
695         sub     \$16,%r15
696
697         call    __poly1305_block
698
699 .Linit_avx:
700         ################################# base 2^64 -> base 2^26
701         mov     $h0,%rax
702         mov     $h0,%rdx
703         shr     \$52,$h0
704         mov     $h1,$d1
705         mov     $h1,$d2
706         shr     \$26,%rdx
707         and     \$0x3ffffff,%rax        # h[0]
708         shl     \$12,$d1
709         and     \$0x3ffffff,%rdx        # h[1]
710         shr     \$14,$h1
711         or      $d1,$h0
712         shl     \$24,$h2
713         and     \$0x3ffffff,$h0         # h[2]
714         shr     \$40,$d2
715         and     \$0x3ffffff,$h1         # h[3]
716         or      $d2,$h2                 # h[4]
717
718         vmovd   %rax#d,$H0
719         vmovd   %rdx#d,$H1
720         vmovd   $h0#d,$H2
721         vmovd   $h1#d,$H3
722         vmovd   $h2#d,$H4
723         movl    \$1,20($ctx)            # set is_base2_26
724
725         call    __poly1305_init_avx
726
727 .Lproceed_avx:
728         mov     %r15,$len
729
730         mov     0(%rsp),%r15
731 .cfi_restore    %r15
732         mov     8(%rsp),%r14
733 .cfi_restore    %r14
734         mov     16(%rsp),%r13
735 .cfi_restore    %r13
736         mov     24(%rsp),%r12
737 .cfi_restore    %r12
738         mov     32(%rsp),%rbp
739 .cfi_restore    %rbp
740         mov     40(%rsp),%rbx
741 .cfi_restore    %rbx
742         lea     48(%rsp),%rax
743         lea     48(%rsp),%rsp
744 .cfi_adjust_cfa_offset  -48
745 .Lbase2_64_avx_epilogue:
746         jmp     .Ldo_avx
747 .cfi_endproc
748
749 .align  32
750 .Leven_avx:
751 .cfi_startproc
752         vmovd           4*0($ctx),$H0           # load hash value
753         vmovd           4*1($ctx),$H1
754         vmovd           4*2($ctx),$H2
755         vmovd           4*3($ctx),$H3
756         vmovd           4*4($ctx),$H4
757
758 .Ldo_avx:
759 ___
760 $code.=<<___    if (!$win64);
761         lea             -0x58(%rsp),%r11
762 .cfi_def_cfa            %r11,0x60
763         sub             \$0x178,%rsp
764 ___
765 $code.=<<___    if ($win64);
766         lea             -0xf8(%rsp),%r11
767         sub             \$0x218,%rsp
768         vmovdqa         %xmm6,0x50(%r11)
769         vmovdqa         %xmm7,0x60(%r11)
770         vmovdqa         %xmm8,0x70(%r11)
771         vmovdqa         %xmm9,0x80(%r11)
772         vmovdqa         %xmm10,0x90(%r11)
773         vmovdqa         %xmm11,0xa0(%r11)
774         vmovdqa         %xmm12,0xb0(%r11)
775         vmovdqa         %xmm13,0xc0(%r11)
776         vmovdqa         %xmm14,0xd0(%r11)
777         vmovdqa         %xmm15,0xe0(%r11)
778 .Ldo_avx_body:
779 ___
780 $code.=<<___;
781         sub             \$64,$len
782         lea             -32($inp),%rax
783         cmovc           %rax,$inp
784
785         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
786         lea             `16*3+64`($ctx),$ctx    # size optimization
787         lea             .Lconst(%rip),%rcx
788
789         ################################################################
790         # load input
791         vmovdqu         16*2($inp),$T0
792         vmovdqu         16*3($inp),$T1
793         vmovdqa         64(%rcx),$MASK          # .Lmask26
794
795         vpsrldq         \$6,$T0,$T2             # splat input
796         vpsrldq         \$6,$T1,$T3
797         vpunpckhqdq     $T1,$T0,$T4             # 4
798         vpunpcklqdq     $T1,$T0,$T0             # 0:1
799         vpunpcklqdq     $T3,$T2,$T3             # 2:3
800
801         vpsrlq          \$40,$T4,$T4            # 4
802         vpsrlq          \$26,$T0,$T1
803         vpand           $MASK,$T0,$T0           # 0
804         vpsrlq          \$4,$T3,$T2
805         vpand           $MASK,$T1,$T1           # 1
806         vpsrlq          \$30,$T3,$T3
807         vpand           $MASK,$T2,$T2           # 2
808         vpand           $MASK,$T3,$T3           # 3
809         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
810
811         jbe             .Lskip_loop_avx
812
813         # expand and copy pre-calculated table to stack
814         vmovdqu         `16*1-64`($ctx),$D1
815         vmovdqu         `16*2-64`($ctx),$D2
816         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
817         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
818         vmovdqa         $D3,-0x90(%r11)
819         vmovdqa         $D0,0x00(%rsp)
820         vpshufd         \$0xEE,$D1,$D4
821         vmovdqu         `16*3-64`($ctx),$D0
822         vpshufd         \$0x44,$D1,$D1
823         vmovdqa         $D4,-0x80(%r11)
824         vmovdqa         $D1,0x10(%rsp)
825         vpshufd         \$0xEE,$D2,$D3
826         vmovdqu         `16*4-64`($ctx),$D1
827         vpshufd         \$0x44,$D2,$D2
828         vmovdqa         $D3,-0x70(%r11)
829         vmovdqa         $D2,0x20(%rsp)
830         vpshufd         \$0xEE,$D0,$D4
831         vmovdqu         `16*5-64`($ctx),$D2
832         vpshufd         \$0x44,$D0,$D0
833         vmovdqa         $D4,-0x60(%r11)
834         vmovdqa         $D0,0x30(%rsp)
835         vpshufd         \$0xEE,$D1,$D3
836         vmovdqu         `16*6-64`($ctx),$D0
837         vpshufd         \$0x44,$D1,$D1
838         vmovdqa         $D3,-0x50(%r11)
839         vmovdqa         $D1,0x40(%rsp)
840         vpshufd         \$0xEE,$D2,$D4
841         vmovdqu         `16*7-64`($ctx),$D1
842         vpshufd         \$0x44,$D2,$D2
843         vmovdqa         $D4,-0x40(%r11)
844         vmovdqa         $D2,0x50(%rsp)
845         vpshufd         \$0xEE,$D0,$D3
846         vmovdqu         `16*8-64`($ctx),$D2
847         vpshufd         \$0x44,$D0,$D0
848         vmovdqa         $D3,-0x30(%r11)
849         vmovdqa         $D0,0x60(%rsp)
850         vpshufd         \$0xEE,$D1,$D4
851         vpshufd         \$0x44,$D1,$D1
852         vmovdqa         $D4,-0x20(%r11)
853         vmovdqa         $D1,0x70(%rsp)
854         vpshufd         \$0xEE,$D2,$D3
855          vmovdqa        0x00(%rsp),$D4          # preload r0^2
856         vpshufd         \$0x44,$D2,$D2
857         vmovdqa         $D3,-0x10(%r11)
858         vmovdqa         $D2,0x80(%rsp)
859
860         jmp             .Loop_avx
861
862 .align  32
863 .Loop_avx:
864         ################################################################
865         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
866         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
867         #   \___________________/
868         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
869         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
870         #   \___________________/ \____________________/
871         #
872         # Note that we start with inp[2:3]*r^2. This is because it
873         # doesn't depend on reduction in previous iteration.
874         ################################################################
875         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
876         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
877         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
878         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
879         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
880         #
881         # though note that $Tx and $Hx are "reversed" in this section,
882         # and $D4 is preloaded with r0^2...
883
884         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
885         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
886           vmovdqa       $H2,0x20(%r11)                          # offload hash
887         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
888          vmovdqa        0x10(%rsp),$H2          # r1^2
889         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
890         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
891
892           vmovdqa       $H0,0x00(%r11)                          #
893         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
894           vmovdqa       $H1,0x10(%r11)                          #
895         vpmuludq        $T3,$H2,$H1             # h3*r1
896         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
897         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
898           vmovdqa       $H3,0x30(%r11)                          #
899         vpmuludq        $T2,$H2,$H0             # h2*r1
900         vpmuludq        $T1,$H2,$H1             # h1*r1
901         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
902          vmovdqa        0x30(%rsp),$H3          # r2^2
903         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
904           vmovdqa       $H4,0x40(%r11)                          #
905         vpmuludq        $T0,$H2,$H2             # h0*r1
906          vpmuludq       $T2,$H3,$H0             # h2*r2
907         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
908
909          vmovdqa        0x40(%rsp),$H4          # s2^2
910         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
911         vpmuludq        $T1,$H3,$H1             # h1*r2
912         vpmuludq        $T0,$H3,$H3             # h0*r2
913         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
914          vmovdqa        0x50(%rsp),$H2          # r3^2
915         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
916         vpmuludq        $T4,$H4,$H0             # h4*s2
917         vpmuludq        $T3,$H4,$H4             # h3*s2
918         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
919          vmovdqa        0x60(%rsp),$H3          # s3^2
920         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
921
922          vmovdqa        0x80(%rsp),$H4          # s4^2
923         vpmuludq        $T1,$H2,$H1             # h1*r3
924         vpmuludq        $T0,$H2,$H2             # h0*r3
925         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
926         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
927         vpmuludq        $T4,$H3,$H0             # h4*s3
928         vpmuludq        $T3,$H3,$H1             # h3*s3
929         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
930          vmovdqu        16*0($inp),$H0                          # load input
931         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
932         vpmuludq        $T2,$H3,$H3             # h2*s3
933          vpmuludq       $T2,$H4,$T2             # h2*s4
934         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
935
936          vmovdqu        16*1($inp),$H1                          #
937         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
938         vpmuludq        $T3,$H4,$T3             # h3*s4
939         vpmuludq        $T4,$H4,$T4             # h4*s4
940          vpsrldq        \$6,$H0,$H2                             # splat input
941         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
942         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
943          vpsrldq        \$6,$H1,$H3                             #
944         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
945         vpmuludq        $T1,$H4,$T0             # h1*s4
946          vpunpckhqdq    $H1,$H0,$H4             # 4
947         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
948          vmovdqa        -0x90(%r11),$T4         # r0^4
949         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
950
951         vpunpcklqdq     $H1,$H0,$H0             # 0:1
952         vpunpcklqdq     $H3,$H2,$H3             # 2:3
953
954         #vpsrlq         \$40,$H4,$H4            # 4
955         vpsrldq         \$`40/8`,$H4,$H4        # 4
956         vpsrlq          \$26,$H0,$H1
957         vpand           $MASK,$H0,$H0           # 0
958         vpsrlq          \$4,$H3,$H2
959         vpand           $MASK,$H1,$H1           # 1
960         vpand           0(%rcx),$H4,$H4         # .Lmask24
961         vpsrlq          \$30,$H3,$H3
962         vpand           $MASK,$H2,$H2           # 2
963         vpand           $MASK,$H3,$H3           # 3
964         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
965
966         vpaddq          0x00(%r11),$H0,$H0      # add hash value
967         vpaddq          0x10(%r11),$H1,$H1
968         vpaddq          0x20(%r11),$H2,$H2
969         vpaddq          0x30(%r11),$H3,$H3
970         vpaddq          0x40(%r11),$H4,$H4
971
972         lea             16*2($inp),%rax
973         lea             16*4($inp),$inp
974         sub             \$64,$len
975         cmovc           %rax,$inp
976
977         ################################################################
978         # Now we accumulate (inp[0:1]+hash)*r^4
979         ################################################################
980         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
981         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
982         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
983         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
984         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
985
986         vpmuludq        $H0,$T4,$T0             # h0*r0
987         vpmuludq        $H1,$T4,$T1             # h1*r0
988         vpaddq          $T0,$D0,$D0
989         vpaddq          $T1,$D1,$D1
990          vmovdqa        -0x80(%r11),$T2         # r1^4
991         vpmuludq        $H2,$T4,$T0             # h2*r0
992         vpmuludq        $H3,$T4,$T1             # h3*r0
993         vpaddq          $T0,$D2,$D2
994         vpaddq          $T1,$D3,$D3
995         vpmuludq        $H4,$T4,$T4             # h4*r0
996          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
997         vpaddq          $T4,$D4,$D4
998
999         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
1000         vpmuludq        $H2,$T2,$T1             # h2*r1
1001         vpmuludq        $H3,$T2,$T0             # h3*r1
1002         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1003          vmovdqa        -0x60(%r11),$T3         # r2^4
1004         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1005         vpmuludq        $H1,$T2,$T1             # h1*r1
1006         vpmuludq        $H0,$T2,$T2             # h0*r1
1007         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
1008         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1009
1010          vmovdqa        -0x50(%r11),$T4         # s2^4
1011         vpmuludq        $H2,$T3,$T0             # h2*r2
1012         vpmuludq        $H1,$T3,$T1             # h1*r2
1013         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
1014         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
1015          vmovdqa        -0x40(%r11),$T2         # r3^4
1016         vpmuludq        $H0,$T3,$T3             # h0*r2
1017         vpmuludq        $H4,$T4,$T0             # h4*s2
1018         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
1019         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
1020          vmovdqa        -0x30(%r11),$T3         # s3^4
1021         vpmuludq        $H3,$T4,$T4             # h3*s2
1022          vpmuludq       $H1,$T2,$T1             # h1*r3
1023         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1024
1025          vmovdqa        -0x10(%r11),$T4         # s4^4
1026         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
1027         vpmuludq        $H0,$T2,$T2             # h0*r3
1028         vpmuludq        $H4,$T3,$T0             # h4*s3
1029         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
1030         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
1031          vmovdqu        16*2($inp),$T0                          # load input
1032         vpmuludq        $H3,$T3,$T2             # h3*s3
1033         vpmuludq        $H2,$T3,$T3             # h2*s3
1034         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
1035          vmovdqu        16*3($inp),$T1                          #
1036         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
1037
1038         vpmuludq        $H2,$T4,$H2             # h2*s4
1039         vpmuludq        $H3,$T4,$H3             # h3*s4
1040          vpsrldq        \$6,$T0,$T2                             # splat input
1041         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
1042         vpmuludq        $H4,$T4,$H4             # h4*s4
1043          vpsrldq        \$6,$T1,$T3                             #
1044         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
1045         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
1046         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
1047         vpmuludq        $H1,$T4,$H0
1048          vpunpckhqdq    $T1,$T0,$T4             # 4
1049         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1050         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1051
1052         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1053         vpunpcklqdq     $T3,$T2,$T3             # 2:3
1054
1055         #vpsrlq         \$40,$T4,$T4            # 4
1056         vpsrldq         \$`40/8`,$T4,$T4        # 4
1057         vpsrlq          \$26,$T0,$T1
1058          vmovdqa        0x00(%rsp),$D4          # preload r0^2
1059         vpand           $MASK,$T0,$T0           # 0
1060         vpsrlq          \$4,$T3,$T2
1061         vpand           $MASK,$T1,$T1           # 1
1062         vpand           0(%rcx),$T4,$T4         # .Lmask24
1063         vpsrlq          \$30,$T3,$T3
1064         vpand           $MASK,$T2,$T2           # 2
1065         vpand           $MASK,$T3,$T3           # 3
1066         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1067
1068         ################################################################
1069         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1070         # and P. Schwabe
1071
1072         vpsrlq          \$26,$H3,$D3
1073         vpand           $MASK,$H3,$H3
1074         vpaddq          $D3,$H4,$H4             # h3 -> h4
1075
1076         vpsrlq          \$26,$H0,$D0
1077         vpand           $MASK,$H0,$H0
1078         vpaddq          $D0,$D1,$H1             # h0 -> h1
1079
1080         vpsrlq          \$26,$H4,$D0
1081         vpand           $MASK,$H4,$H4
1082
1083         vpsrlq          \$26,$H1,$D1
1084         vpand           $MASK,$H1,$H1
1085         vpaddq          $D1,$H2,$H2             # h1 -> h2
1086
1087         vpaddq          $D0,$H0,$H0
1088         vpsllq          \$2,$D0,$D0
1089         vpaddq          $D0,$H0,$H0             # h4 -> h0
1090
1091         vpsrlq          \$26,$H2,$D2
1092         vpand           $MASK,$H2,$H2
1093         vpaddq          $D2,$H3,$H3             # h2 -> h3
1094
1095         vpsrlq          \$26,$H0,$D0
1096         vpand           $MASK,$H0,$H0
1097         vpaddq          $D0,$H1,$H1             # h0 -> h1
1098
1099         vpsrlq          \$26,$H3,$D3
1100         vpand           $MASK,$H3,$H3
1101         vpaddq          $D3,$H4,$H4             # h3 -> h4
1102
1103         ja              .Loop_avx
1104
1105 .Lskip_loop_avx:
1106         ################################################################
1107         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1108
1109         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1110         add             \$32,$len
1111         jnz             .Long_tail_avx
1112
1113         vpaddq          $H2,$T2,$T2
1114         vpaddq          $H0,$T0,$T0
1115         vpaddq          $H1,$T1,$T1
1116         vpaddq          $H3,$T3,$T3
1117         vpaddq          $H4,$T4,$T4
1118
1119 .Long_tail_avx:
1120         vmovdqa         $H2,0x20(%r11)
1121         vmovdqa         $H0,0x00(%r11)
1122         vmovdqa         $H1,0x10(%r11)
1123         vmovdqa         $H3,0x30(%r11)
1124         vmovdqa         $H4,0x40(%r11)
1125
1126         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1127         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1128         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1129         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1130         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1131
1132         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1133         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1134          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1135         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1136         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1137         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1138
1139         vpmuludq        $T3,$H2,$H0             # h3*r1
1140         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1141          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1142         vpmuludq        $T2,$H2,$H1             # h2*r1
1143         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1144          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1145         vpmuludq        $T1,$H2,$H0             # h1*r1
1146         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1147         vpmuludq        $T0,$H2,$H2             # h0*r1
1148         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1149         vpmuludq        $T4,$H3,$H3             # h4*s1
1150         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1151
1152          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1153         vpmuludq        $T2,$H4,$H1             # h2*r2
1154         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1155         vpmuludq        $T1,$H4,$H0             # h1*r2
1156         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1157          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1158         vpmuludq        $T0,$H4,$H4             # h0*r2
1159         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1160         vpmuludq        $T4,$H2,$H1             # h4*s2
1161         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1162          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1163         vpmuludq        $T3,$H2,$H2             # h3*s2
1164         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1165
1166         vpmuludq        $T1,$H3,$H0             # h1*r3
1167         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1168         vpmuludq        $T0,$H3,$H3             # h0*r3
1169         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1170          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1171         vpmuludq        $T4,$H4,$H1             # h4*s3
1172         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1173          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1174         vpmuludq        $T3,$H4,$H0             # h3*s3
1175         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1176         vpmuludq        $T2,$H4,$H4             # h2*s3
1177         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1178
1179         vpmuludq        $T0,$H2,$H2             # h0*r4
1180         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1181         vpmuludq        $T4,$H3,$H1             # h4*s4
1182         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1183         vpmuludq        $T3,$H3,$H0             # h3*s4
1184         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1185         vpmuludq        $T2,$H3,$H1             # h2*s4
1186         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1187         vpmuludq        $T1,$H3,$H3             # h1*s4
1188         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1189
1190         jz              .Lshort_tail_avx
1191
1192         vmovdqu         16*0($inp),$H0          # load input
1193         vmovdqu         16*1($inp),$H1
1194
1195         vpsrldq         \$6,$H0,$H2             # splat input
1196         vpsrldq         \$6,$H1,$H3
1197         vpunpckhqdq     $H1,$H0,$H4             # 4
1198         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1199         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1200
1201         vpsrlq          \$40,$H4,$H4            # 4
1202         vpsrlq          \$26,$H0,$H1
1203         vpand           $MASK,$H0,$H0           # 0
1204         vpsrlq          \$4,$H3,$H2
1205         vpand           $MASK,$H1,$H1           # 1
1206         vpsrlq          \$30,$H3,$H3
1207         vpand           $MASK,$H2,$H2           # 2
1208         vpand           $MASK,$H3,$H3           # 3
1209         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1210
1211         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1212         vpaddq          0x00(%r11),$H0,$H0
1213         vpaddq          0x10(%r11),$H1,$H1
1214         vpaddq          0x20(%r11),$H2,$H2
1215         vpaddq          0x30(%r11),$H3,$H3
1216         vpaddq          0x40(%r11),$H4,$H4
1217
1218         ################################################################
1219         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1220
1221         vpmuludq        $H0,$T4,$T0             # h0*r0
1222         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1223         vpmuludq        $H1,$T4,$T1             # h1*r0
1224         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1225         vpmuludq        $H2,$T4,$T0             # h2*r0
1226         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1227          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1228         vpmuludq        $H3,$T4,$T1             # h3*r0
1229         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1230         vpmuludq        $H4,$T4,$T4             # h4*r0
1231         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1232
1233         vpmuludq        $H3,$T2,$T0             # h3*r1
1234         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1235          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1236         vpmuludq        $H2,$T2,$T1             # h2*r1
1237         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1238          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1239         vpmuludq        $H1,$T2,$T0             # h1*r1
1240         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1241         vpmuludq        $H0,$T2,$T2             # h0*r1
1242         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1243         vpmuludq        $H4,$T3,$T3             # h4*s1
1244         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1245
1246          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1247         vpmuludq        $H2,$T4,$T1             # h2*r2
1248         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1249         vpmuludq        $H1,$T4,$T0             # h1*r2
1250         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1251          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1252         vpmuludq        $H0,$T4,$T4             # h0*r2
1253         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1254         vpmuludq        $H4,$T2,$T1             # h4*s2
1255         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1256          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1257         vpmuludq        $H3,$T2,$T2             # h3*s2
1258         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1259
1260         vpmuludq        $H1,$T3,$T0             # h1*r3
1261         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1262         vpmuludq        $H0,$T3,$T3             # h0*r3
1263         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1264          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1265         vpmuludq        $H4,$T4,$T1             # h4*s3
1266         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1267          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1268         vpmuludq        $H3,$T4,$T0             # h3*s3
1269         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1270         vpmuludq        $H2,$T4,$T4             # h2*s3
1271         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1272
1273         vpmuludq        $H0,$T2,$T2             # h0*r4
1274         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1275         vpmuludq        $H4,$T3,$T1             # h4*s4
1276         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1277         vpmuludq        $H3,$T3,$T0             # h3*s4
1278         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1279         vpmuludq        $H2,$T3,$T1             # h2*s4
1280         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1281         vpmuludq        $H1,$T3,$T3             # h1*s4
1282         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1283
1284 .Lshort_tail_avx:
1285         ################################################################
1286         # horizontal addition
1287
1288         vpsrldq         \$8,$D4,$T4
1289         vpsrldq         \$8,$D3,$T3
1290         vpsrldq         \$8,$D1,$T1
1291         vpsrldq         \$8,$D0,$T0
1292         vpsrldq         \$8,$D2,$T2
1293         vpaddq          $T3,$D3,$D3
1294         vpaddq          $T4,$D4,$D4
1295         vpaddq          $T0,$D0,$D0
1296         vpaddq          $T1,$D1,$D1
1297         vpaddq          $T2,$D2,$D2
1298
1299         ################################################################
1300         # lazy reduction
1301
1302         vpsrlq          \$26,$D3,$H3
1303         vpand           $MASK,$D3,$D3
1304         vpaddq          $H3,$D4,$D4             # h3 -> h4
1305
1306         vpsrlq          \$26,$D0,$H0
1307         vpand           $MASK,$D0,$D0
1308         vpaddq          $H0,$D1,$D1             # h0 -> h1
1309
1310         vpsrlq          \$26,$D4,$H4
1311         vpand           $MASK,$D4,$D4
1312
1313         vpsrlq          \$26,$D1,$H1
1314         vpand           $MASK,$D1,$D1
1315         vpaddq          $H1,$D2,$D2             # h1 -> h2
1316
1317         vpaddq          $H4,$D0,$D0
1318         vpsllq          \$2,$H4,$H4
1319         vpaddq          $H4,$D0,$D0             # h4 -> h0
1320
1321         vpsrlq          \$26,$D2,$H2
1322         vpand           $MASK,$D2,$D2
1323         vpaddq          $H2,$D3,$D3             # h2 -> h3
1324
1325         vpsrlq          \$26,$D0,$H0
1326         vpand           $MASK,$D0,$D0
1327         vpaddq          $H0,$D1,$D1             # h0 -> h1
1328
1329         vpsrlq          \$26,$D3,$H3
1330         vpand           $MASK,$D3,$D3
1331         vpaddq          $H3,$D4,$D4             # h3 -> h4
1332
1333         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1334         vmovd           $D1,`4*1-48-64`($ctx)
1335         vmovd           $D2,`4*2-48-64`($ctx)
1336         vmovd           $D3,`4*3-48-64`($ctx)
1337         vmovd           $D4,`4*4-48-64`($ctx)
1338 ___
1339 $code.=<<___    if ($win64);
1340         vmovdqa         0x50(%r11),%xmm6
1341         vmovdqa         0x60(%r11),%xmm7
1342         vmovdqa         0x70(%r11),%xmm8
1343         vmovdqa         0x80(%r11),%xmm9
1344         vmovdqa         0x90(%r11),%xmm10
1345         vmovdqa         0xa0(%r11),%xmm11
1346         vmovdqa         0xb0(%r11),%xmm12
1347         vmovdqa         0xc0(%r11),%xmm13
1348         vmovdqa         0xd0(%r11),%xmm14
1349         vmovdqa         0xe0(%r11),%xmm15
1350         lea             0xf8(%r11),%rsp
1351 .Ldo_avx_epilogue:
1352 ___
1353 $code.=<<___    if (!$win64);
1354         lea             0x58(%r11),%rsp
1355 .cfi_def_cfa            %rsp,8
1356 ___
1357 $code.=<<___;
1358         vzeroupper
1359         ret
1360 .cfi_endproc
1361 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1362
1363 .type   poly1305_emit_avx,\@function,3
1364 .align  32
1365 poly1305_emit_avx:
1366         cmpl    \$0,20($ctx)    # is_base2_26?
1367         je      .Lemit
1368
1369         mov     0($ctx),%eax    # load hash value base 2^26
1370         mov     4($ctx),%ecx
1371         mov     8($ctx),%r8d
1372         mov     12($ctx),%r11d
1373         mov     16($ctx),%r10d
1374
1375         shl     \$26,%rcx       # base 2^26 -> base 2^64
1376         mov     %r8,%r9
1377         shl     \$52,%r8
1378         add     %rcx,%rax
1379         shr     \$12,%r9
1380         add     %rax,%r8        # h0
1381         adc     \$0,%r9
1382
1383         shl     \$14,%r11
1384         mov     %r10,%rax
1385         shr     \$24,%r10
1386         add     %r11,%r9
1387         shl     \$40,%rax
1388         add     %rax,%r9        # h1
1389         adc     \$0,%r10        # h2
1390
1391         mov     %r10,%rax       # could be partially reduced, so reduce
1392         mov     %r10,%rcx
1393         and     \$3,%r10
1394         shr     \$2,%rax
1395         and     \$-4,%rcx
1396         add     %rcx,%rax
1397         add     %rax,%r8
1398         adc     \$0,%r9
1399         adc     \$0,%r10
1400
1401         mov     %r8,%rax
1402         add     \$5,%r8         # compare to modulus
1403         mov     %r9,%rcx
1404         adc     \$0,%r9
1405         adc     \$0,%r10
1406         shr     \$2,%r10        # did 130-bit value overfow?
1407         cmovnz  %r8,%rax
1408         cmovnz  %r9,%rcx
1409
1410         add     0($nonce),%rax  # accumulate nonce
1411         adc     8($nonce),%rcx
1412         mov     %rax,0($mac)    # write result
1413         mov     %rcx,8($mac)
1414
1415         ret
1416 .size   poly1305_emit_avx,.-poly1305_emit_avx
1417 ___
1418
1419 if ($avx>1) {
1420 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1421     map("%ymm$_",(0..15));
1422 my $S4=$MASK;
1423
1424 $code.=<<___;
1425 .type   poly1305_blocks_avx2,\@function,4
1426 .align  32
1427 poly1305_blocks_avx2:
1428 .cfi_startproc
1429         mov     20($ctx),%r8d           # is_base2_26
1430         cmp     \$128,$len
1431         jae     .Lblocks_avx2
1432         test    %r8d,%r8d
1433         jz      .Lblocks
1434
1435 .Lblocks_avx2:
1436         and     \$-16,$len
1437         jz      .Lno_data_avx2
1438
1439         vzeroupper
1440
1441         test    %r8d,%r8d
1442         jz      .Lbase2_64_avx2
1443
1444         test    \$63,$len
1445         jz      .Leven_avx2
1446
1447         push    %rbx
1448 .cfi_push       %rbx
1449         push    %rbp
1450 .cfi_push       %rbp
1451         push    %r12
1452 .cfi_push       %r12
1453         push    %r13
1454 .cfi_push       %r13
1455         push    %r14
1456 .cfi_push       %r14
1457         push    %r15
1458 .cfi_push       %r15
1459 .Lblocks_avx2_body:
1460
1461         mov     $len,%r15               # reassign $len
1462
1463         mov     0($ctx),$d1             # load hash value
1464         mov     8($ctx),$d2
1465         mov     16($ctx),$h2#d
1466
1467         mov     24($ctx),$r0            # load r
1468         mov     32($ctx),$s1
1469
1470         ################################# base 2^26 -> base 2^64
1471         mov     $d1#d,$h0#d
1472         and     \$`-1*(1<<31)`,$d1
1473         mov     $d2,$r1                 # borrow $r1
1474         mov     $d2#d,$h1#d
1475         and     \$`-1*(1<<31)`,$d2
1476
1477         shr     \$6,$d1
1478         shl     \$52,$r1
1479         add     $d1,$h0
1480         shr     \$12,$h1
1481         shr     \$18,$d2
1482         add     $r1,$h0
1483         adc     $d2,$h1
1484
1485         mov     $h2,$d1
1486         shl     \$40,$d1
1487         shr     \$24,$h2
1488         add     $d1,$h1
1489         adc     \$0,$h2                 # can be partially reduced...
1490
1491         mov     \$-4,$d2                # ... so reduce
1492         mov     $h2,$d1
1493         and     $h2,$d2
1494         shr     \$2,$d1
1495         and     \$3,$h2
1496         add     $d2,$d1                 # =*5
1497         add     $d1,$h0
1498         adc     \$0,$h1
1499         adc     \$0,$h2
1500
1501         mov     $s1,$r1
1502         mov     $s1,%rax
1503         shr     \$2,$s1
1504         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1505
1506 .Lbase2_26_pre_avx2:
1507         add     0($inp),$h0             # accumulate input
1508         adc     8($inp),$h1
1509         lea     16($inp),$inp
1510         adc     $padbit,$h2
1511         sub     \$16,%r15
1512
1513         call    __poly1305_block
1514         mov     $r1,%rax
1515
1516         test    \$63,%r15
1517         jnz     .Lbase2_26_pre_avx2
1518
1519         test    $padbit,$padbit         # if $padbit is zero,
1520         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1521
1522         ################################# base 2^64 -> base 2^26
1523         mov     $h0,%rax
1524         mov     $h0,%rdx
1525         shr     \$52,$h0
1526         mov     $h1,$r0
1527         mov     $h1,$r1
1528         shr     \$26,%rdx
1529         and     \$0x3ffffff,%rax        # h[0]
1530         shl     \$12,$r0
1531         and     \$0x3ffffff,%rdx        # h[1]
1532         shr     \$14,$h1
1533         or      $r0,$h0
1534         shl     \$24,$h2
1535         and     \$0x3ffffff,$h0         # h[2]
1536         shr     \$40,$r1
1537         and     \$0x3ffffff,$h1         # h[3]
1538         or      $r1,$h2                 # h[4]
1539
1540         test    %r15,%r15
1541         jz      .Lstore_base2_26_avx2
1542
1543         vmovd   %rax#d,%x#$H0
1544         vmovd   %rdx#d,%x#$H1
1545         vmovd   $h0#d,%x#$H2
1546         vmovd   $h1#d,%x#$H3
1547         vmovd   $h2#d,%x#$H4
1548         jmp     .Lproceed_avx2
1549
1550 .align  32
1551 .Lstore_base2_64_avx2:
1552         mov     $h0,0($ctx)
1553         mov     $h1,8($ctx)
1554         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1555         jmp     .Ldone_avx2
1556
1557 .align  16
1558 .Lstore_base2_26_avx2:
1559         mov     %rax#d,0($ctx)          # store hash value base 2^26
1560         mov     %rdx#d,4($ctx)
1561         mov     $h0#d,8($ctx)
1562         mov     $h1#d,12($ctx)
1563         mov     $h2#d,16($ctx)
1564 .align  16
1565 .Ldone_avx2:
1566         mov     0(%rsp),%r15
1567 .cfi_restore    %r15
1568         mov     8(%rsp),%r14
1569 .cfi_restore    %r14
1570         mov     16(%rsp),%r13
1571 .cfi_restore    %r13
1572         mov     24(%rsp),%r12
1573 .cfi_restore    %r12
1574         mov     32(%rsp),%rbp
1575 .cfi_restore    %rbp
1576         mov     40(%rsp),%rbx
1577 .cfi_restore    %rbx
1578         lea     48(%rsp),%rsp
1579 .cfi_adjust_cfa_offset  -48
1580 .Lno_data_avx2:
1581 .Lblocks_avx2_epilogue:
1582         ret
1583 .cfi_endproc
1584
1585 .align  32
1586 .Lbase2_64_avx2:
1587 .cfi_startproc
1588         push    %rbx
1589 .cfi_push       %rbx
1590         push    %rbp
1591 .cfi_push       %rbp
1592         push    %r12
1593 .cfi_push       %r12
1594         push    %r13
1595 .cfi_push       %r13
1596         push    %r14
1597 .cfi_push       %r14
1598         push    %r15
1599 .cfi_push       %r15
1600 .Lbase2_64_avx2_body:
1601
1602         mov     $len,%r15               # reassign $len
1603
1604         mov     24($ctx),$r0            # load r
1605         mov     32($ctx),$s1
1606
1607         mov     0($ctx),$h0             # load hash value
1608         mov     8($ctx),$h1
1609         mov     16($ctx),$h2#d
1610
1611         mov     $s1,$r1
1612         mov     $s1,%rax
1613         shr     \$2,$s1
1614         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1615
1616         test    \$63,$len
1617         jz      .Linit_avx2
1618
1619 .Lbase2_64_pre_avx2:
1620         add     0($inp),$h0             # accumulate input
1621         adc     8($inp),$h1
1622         lea     16($inp),$inp
1623         adc     $padbit,$h2
1624         sub     \$16,%r15
1625
1626         call    __poly1305_block
1627         mov     $r1,%rax
1628
1629         test    \$63,%r15
1630         jnz     .Lbase2_64_pre_avx2
1631
1632 .Linit_avx2:
1633         ################################# base 2^64 -> base 2^26
1634         mov     $h0,%rax
1635         mov     $h0,%rdx
1636         shr     \$52,$h0
1637         mov     $h1,$d1
1638         mov     $h1,$d2
1639         shr     \$26,%rdx
1640         and     \$0x3ffffff,%rax        # h[0]
1641         shl     \$12,$d1
1642         and     \$0x3ffffff,%rdx        # h[1]
1643         shr     \$14,$h1
1644         or      $d1,$h0
1645         shl     \$24,$h2
1646         and     \$0x3ffffff,$h0         # h[2]
1647         shr     \$40,$d2
1648         and     \$0x3ffffff,$h1         # h[3]
1649         or      $d2,$h2                 # h[4]
1650
1651         vmovd   %rax#d,%x#$H0
1652         vmovd   %rdx#d,%x#$H1
1653         vmovd   $h0#d,%x#$H2
1654         vmovd   $h1#d,%x#$H3
1655         vmovd   $h2#d,%x#$H4
1656         movl    \$1,20($ctx)            # set is_base2_26
1657
1658         call    __poly1305_init_avx
1659
1660 .Lproceed_avx2:
1661         mov     %r15,$len                       # restore $len
1662         mov     OPENSSL_ia32cap_P+8(%rip),%r10d
1663         mov     \$`(1<<31|1<<30|1<<16)`,%r11d
1664
1665         mov     0(%rsp),%r15
1666 .cfi_restore    %r15
1667         mov     8(%rsp),%r14
1668 .cfi_restore    %r14
1669         mov     16(%rsp),%r13
1670 .cfi_restore    %r13
1671         mov     24(%rsp),%r12
1672 .cfi_restore    %r12
1673         mov     32(%rsp),%rbp
1674 .cfi_restore    %rbp
1675         mov     40(%rsp),%rbx
1676 .cfi_restore    %rbx
1677         lea     48(%rsp),%rax
1678         lea     48(%rsp),%rsp
1679 .cfi_adjust_cfa_offset  -48
1680 .Lbase2_64_avx2_epilogue:
1681         jmp     .Ldo_avx2
1682 .cfi_endproc
1683
1684 .align  32
1685 .Leven_avx2:
1686 .cfi_startproc
1687         mov             OPENSSL_ia32cap_P+8(%rip),%r10d
1688         mov             \$`(1<<31|1<<30|1<<16)`,%r11d
1689         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1690         vmovd           4*1($ctx),%x#$H1
1691         vmovd           4*2($ctx),%x#$H2
1692         vmovd           4*3($ctx),%x#$H3
1693         vmovd           4*4($ctx),%x#$H4
1694
1695 .Ldo_avx2:
1696 ___
1697 $code.=<<___            if ($avx>2);
1698         cmp             \$512,$len
1699         jb              .Lskip_avx512
1700         and             %r11d,%r10d
1701         cmp             %r11d,%r10d             # check for AVX512F+BW+VL
1702         je              .Lblocks_avx512
1703 .Lskip_avx512:
1704 ___
1705 $code.=<<___    if (!$win64);
1706         lea             -8(%rsp),%r11
1707 .cfi_def_cfa            %r11,16
1708         sub             \$0x128,%rsp
1709 ___
1710 $code.=<<___    if ($win64);
1711         lea             -0xf8(%rsp),%r11
1712         sub             \$0x1c8,%rsp
1713         vmovdqa         %xmm6,0x50(%r11)
1714         vmovdqa         %xmm7,0x60(%r11)
1715         vmovdqa         %xmm8,0x70(%r11)
1716         vmovdqa         %xmm9,0x80(%r11)
1717         vmovdqa         %xmm10,0x90(%r11)
1718         vmovdqa         %xmm11,0xa0(%r11)
1719         vmovdqa         %xmm12,0xb0(%r11)
1720         vmovdqa         %xmm13,0xc0(%r11)
1721         vmovdqa         %xmm14,0xd0(%r11)
1722         vmovdqa         %xmm15,0xe0(%r11)
1723 .Ldo_avx2_body:
1724 ___
1725 $code.=<<___;
1726         lea             .Lconst(%rip),%rcx
1727         lea             48+64($ctx),$ctx        # size optimization
1728         vmovdqa         96(%rcx),$T0            # .Lpermd_avx2
1729
1730         # expand and copy pre-calculated table to stack
1731         vmovdqu         `16*0-64`($ctx),%x#$T2
1732         and             \$-512,%rsp
1733         vmovdqu         `16*1-64`($ctx),%x#$T3
1734         vmovdqu         `16*2-64`($ctx),%x#$T4
1735         vmovdqu         `16*3-64`($ctx),%x#$D0
1736         vmovdqu         `16*4-64`($ctx),%x#$D1
1737         vmovdqu         `16*5-64`($ctx),%x#$D2
1738         lea             0x90(%rsp),%rax         # size optimization
1739         vmovdqu         `16*6-64`($ctx),%x#$D3
1740         vpermd          $T2,$T0,$T2             # 00003412 -> 14243444
1741         vmovdqu         `16*7-64`($ctx),%x#$D4
1742         vpermd          $T3,$T0,$T3
1743         vmovdqu         `16*8-64`($ctx),%x#$MASK
1744         vpermd          $T4,$T0,$T4
1745         vmovdqa         $T2,0x00(%rsp)
1746         vpermd          $D0,$T0,$D0
1747         vmovdqa         $T3,0x20-0x90(%rax)
1748         vpermd          $D1,$T0,$D1
1749         vmovdqa         $T4,0x40-0x90(%rax)
1750         vpermd          $D2,$T0,$D2
1751         vmovdqa         $D0,0x60-0x90(%rax)
1752         vpermd          $D3,$T0,$D3
1753         vmovdqa         $D1,0x80-0x90(%rax)
1754         vpermd          $D4,$T0,$D4
1755         vmovdqa         $D2,0xa0-0x90(%rax)
1756         vpermd          $MASK,$T0,$MASK
1757         vmovdqa         $D3,0xc0-0x90(%rax)
1758         vmovdqa         $D4,0xe0-0x90(%rax)
1759         vmovdqa         $MASK,0x100-0x90(%rax)
1760         vmovdqa         64(%rcx),$MASK          # .Lmask26
1761
1762         ################################################################
1763         # load input
1764         vmovdqu         16*0($inp),%x#$T0
1765         vmovdqu         16*1($inp),%x#$T1
1766         vinserti128     \$1,16*2($inp),$T0,$T0
1767         vinserti128     \$1,16*3($inp),$T1,$T1
1768         lea             16*4($inp),$inp
1769
1770         vpsrldq         \$6,$T0,$T2             # splat input
1771         vpsrldq         \$6,$T1,$T3
1772         vpunpckhqdq     $T1,$T0,$T4             # 4
1773         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1774         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1775
1776         vpsrlq          \$30,$T2,$T3
1777         vpsrlq          \$4,$T2,$T2
1778         vpsrlq          \$26,$T0,$T1
1779         vpsrlq          \$40,$T4,$T4            # 4
1780         vpand           $MASK,$T2,$T2           # 2
1781         vpand           $MASK,$T0,$T0           # 0
1782         vpand           $MASK,$T1,$T1           # 1
1783         vpand           $MASK,$T3,$T3           # 3
1784         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1785
1786         vpaddq          $H2,$T2,$H2             # accumulate input
1787         sub             \$64,$len
1788         jz              .Ltail_avx2
1789         jmp             .Loop_avx2
1790
1791 .align  32
1792 .Loop_avx2:
1793         ################################################################
1794         # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1795         # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1796         # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1797         # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1798         #   \________/\__________/
1799         ################################################################
1800         #vpaddq         $H2,$T2,$H2             # accumulate input
1801         vpaddq          $H0,$T0,$H0
1802         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1803         vpaddq          $H1,$T1,$H1
1804         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1805         vpaddq          $H3,$T3,$H3
1806         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1807         vpaddq          $H4,$T4,$H4
1808         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1809         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1810
1811         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1812         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1813         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1814         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1815         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1816         #
1817         # however, as h2 is "chronologically" first one available pull
1818         # corresponding operations up, so it's
1819         #
1820         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1821         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1822         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1823         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1824         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1825
1826         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1827         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1828         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1829         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1830         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1831
1832         vpmuludq        $H0,$T1,$T4             # h0*r1
1833         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1834         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1835         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1836         vpmuludq        $H3,$T1,$T4             # h3*r1
1837         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1838         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1839         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1840          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1841
1842         vpmuludq        $H0,$T0,$T4             # h0*r0
1843         vpmuludq        $H1,$T0,$H2             # h1*r0
1844         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1845         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1846         vpmuludq        $H3,$T0,$T4             # h3*r0
1847         vpmuludq        $H4,$T0,$H2             # h4*r0
1848          vmovdqu        16*0($inp),%x#$T0       # load input
1849         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1850         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1851          vinserti128    \$1,16*2($inp),$T0,$T0
1852
1853         vpmuludq        $H3,$T1,$T4             # h3*s2
1854         vpmuludq        $H4,$T1,$H2             # h4*s2
1855          vmovdqu        16*1($inp),%x#$T1
1856         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1857         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1858          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1859         vpmuludq        $H1,$T2,$T4             # h1*r2
1860         vpmuludq        $H0,$T2,$T2             # h0*r2
1861         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1862         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1863          vinserti128    \$1,16*3($inp),$T1,$T1
1864          lea            16*4($inp),$inp
1865
1866         vpmuludq        $H1,$H2,$T4             # h1*r3
1867         vpmuludq        $H0,$H2,$H2             # h0*r3
1868          vpsrldq        \$6,$T0,$T2             # splat input
1869         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1870         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1871         vpmuludq        $H3,$T3,$T4             # h3*s3
1872         vpmuludq        $H4,$T3,$H2             # h4*s3
1873          vpsrldq        \$6,$T1,$T3
1874         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1875         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1876          vpunpckhqdq    $T1,$T0,$T4             # 4
1877
1878         vpmuludq        $H3,$S4,$H3             # h3*s4
1879         vpmuludq        $H4,$S4,$H4             # h4*s4
1880          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1881         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1882         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1883          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1884         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1885         vpmuludq        $H1,$S4,$H0             # h1*s4
1886         vmovdqa         64(%rcx),$MASK          # .Lmask26
1887         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1888         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1889
1890         ################################################################
1891         # lazy reduction (interleaved with tail of input splat)
1892
1893         vpsrlq          \$26,$H3,$D3
1894         vpand           $MASK,$H3,$H3
1895         vpaddq          $D3,$H4,$H4             # h3 -> h4
1896
1897         vpsrlq          \$26,$H0,$D0
1898         vpand           $MASK,$H0,$H0
1899         vpaddq          $D0,$D1,$H1             # h0 -> h1
1900
1901         vpsrlq          \$26,$H4,$D4
1902         vpand           $MASK,$H4,$H4
1903
1904          vpsrlq         \$4,$T3,$T2
1905
1906         vpsrlq          \$26,$H1,$D1
1907         vpand           $MASK,$H1,$H1
1908         vpaddq          $D1,$H2,$H2             # h1 -> h2
1909
1910         vpaddq          $D4,$H0,$H0
1911         vpsllq          \$2,$D4,$D4
1912         vpaddq          $D4,$H0,$H0             # h4 -> h0
1913
1914          vpand          $MASK,$T2,$T2           # 2
1915          vpsrlq         \$26,$T0,$T1
1916
1917         vpsrlq          \$26,$H2,$D2
1918         vpand           $MASK,$H2,$H2
1919         vpaddq          $D2,$H3,$H3             # h2 -> h3
1920
1921          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1922          vpsrlq         \$30,$T3,$T3
1923
1924         vpsrlq          \$26,$H0,$D0
1925         vpand           $MASK,$H0,$H0
1926         vpaddq          $D0,$H1,$H1             # h0 -> h1
1927
1928          vpsrlq         \$40,$T4,$T4            # 4
1929
1930         vpsrlq          \$26,$H3,$D3
1931         vpand           $MASK,$H3,$H3
1932         vpaddq          $D3,$H4,$H4             # h3 -> h4
1933
1934          vpand          $MASK,$T0,$T0           # 0
1935          vpand          $MASK,$T1,$T1           # 1
1936          vpand          $MASK,$T3,$T3           # 3
1937          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1938
1939         sub             \$64,$len
1940         jnz             .Loop_avx2
1941
1942         .byte           0x66,0x90
1943 .Ltail_avx2:
1944         ################################################################
1945         # while above multiplications were by r^4 in all lanes, in last
1946         # iteration we multiply least significant lane by r^4 and most
1947         # significant one by r, so copy of above except that references
1948         # to the precomputed table are displaced by 4...
1949
1950         #vpaddq         $H2,$T2,$H2             # accumulate input
1951         vpaddq          $H0,$T0,$H0
1952         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1953         vpaddq          $H1,$T1,$H1
1954         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1955         vpaddq          $H3,$T3,$H3
1956         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1957         vpaddq          $H4,$T4,$H4
1958         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1959         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1960
1961         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1962         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1963         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1964         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1965         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1966
1967         vpmuludq        $H0,$T1,$T4             # h0*r1
1968         vpmuludq        $H1,$T1,$H2             # h1*r1
1969         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1970         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1971         vpmuludq        $H3,$T1,$T4             # h3*r1
1972         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1973         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1974         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1975
1976         vpmuludq        $H0,$T0,$T4             # h0*r0
1977         vpmuludq        $H1,$T0,$H2             # h1*r0
1978         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1979          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1980         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1981         vpmuludq        $H3,$T0,$T4             # h3*r0
1982         vpmuludq        $H4,$T0,$H2             # h4*r0
1983         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1984         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1985
1986         vpmuludq        $H3,$T1,$T4             # h3*s2
1987         vpmuludq        $H4,$T1,$H2             # h4*s2
1988         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1989         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1990          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1991         vpmuludq        $H1,$T2,$T4             # h1*r2
1992         vpmuludq        $H0,$T2,$T2             # h0*r2
1993         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1994         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1995
1996         vpmuludq        $H1,$H2,$T4             # h1*r3
1997         vpmuludq        $H0,$H2,$H2             # h0*r3
1998         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1999         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
2000         vpmuludq        $H3,$T3,$T4             # h3*s3
2001         vpmuludq        $H4,$T3,$H2             # h4*s3
2002         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
2003         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
2004
2005         vpmuludq        $H3,$S4,$H3             # h3*s4
2006         vpmuludq        $H4,$S4,$H4             # h4*s4
2007         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
2008         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
2009         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
2010         vpmuludq        $H1,$S4,$H0             # h1*s4
2011         vmovdqa         64(%rcx),$MASK          # .Lmask26
2012         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
2013         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
2014
2015         ################################################################
2016         # horizontal addition
2017
2018         vpsrldq         \$8,$D1,$T1
2019         vpsrldq         \$8,$H2,$T2
2020         vpsrldq         \$8,$H3,$T3
2021         vpsrldq         \$8,$H4,$T4
2022         vpsrldq         \$8,$H0,$T0
2023         vpaddq          $T1,$D1,$D1
2024         vpaddq          $T2,$H2,$H2
2025         vpaddq          $T3,$H3,$H3
2026         vpaddq          $T4,$H4,$H4
2027         vpaddq          $T0,$H0,$H0
2028
2029         vpermq          \$0x2,$H3,$T3
2030         vpermq          \$0x2,$H4,$T4
2031         vpermq          \$0x2,$H0,$T0
2032         vpermq          \$0x2,$D1,$T1
2033         vpermq          \$0x2,$H2,$T2
2034         vpaddq          $T3,$H3,$H3
2035         vpaddq          $T4,$H4,$H4
2036         vpaddq          $T0,$H0,$H0
2037         vpaddq          $T1,$D1,$D1
2038         vpaddq          $T2,$H2,$H2
2039
2040         ################################################################
2041         # lazy reduction
2042
2043         vpsrlq          \$26,$H3,$D3
2044         vpand           $MASK,$H3,$H3
2045         vpaddq          $D3,$H4,$H4             # h3 -> h4
2046
2047         vpsrlq          \$26,$H0,$D0
2048         vpand           $MASK,$H0,$H0
2049         vpaddq          $D0,$D1,$H1             # h0 -> h1
2050
2051         vpsrlq          \$26,$H4,$D4
2052         vpand           $MASK,$H4,$H4
2053
2054         vpsrlq          \$26,$H1,$D1
2055         vpand           $MASK,$H1,$H1
2056         vpaddq          $D1,$H2,$H2             # h1 -> h2
2057
2058         vpaddq          $D4,$H0,$H0
2059         vpsllq          \$2,$D4,$D4
2060         vpaddq          $D4,$H0,$H0             # h4 -> h0
2061
2062         vpsrlq          \$26,$H2,$D2
2063         vpand           $MASK,$H2,$H2
2064         vpaddq          $D2,$H3,$H3             # h2 -> h3
2065
2066         vpsrlq          \$26,$H0,$D0
2067         vpand           $MASK,$H0,$H0
2068         vpaddq          $D0,$H1,$H1             # h0 -> h1
2069
2070         vpsrlq          \$26,$H3,$D3
2071         vpand           $MASK,$H3,$H3
2072         vpaddq          $D3,$H4,$H4             # h3 -> h4
2073
2074         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2075         vmovd           %x#$H1,`4*1-48-64`($ctx)
2076         vmovd           %x#$H2,`4*2-48-64`($ctx)
2077         vmovd           %x#$H3,`4*3-48-64`($ctx)
2078         vmovd           %x#$H4,`4*4-48-64`($ctx)
2079 ___
2080 $code.=<<___    if ($win64);
2081         vmovdqa         0x50(%r11),%xmm6
2082         vmovdqa         0x60(%r11),%xmm7
2083         vmovdqa         0x70(%r11),%xmm8
2084         vmovdqa         0x80(%r11),%xmm9
2085         vmovdqa         0x90(%r11),%xmm10
2086         vmovdqa         0xa0(%r11),%xmm11
2087         vmovdqa         0xb0(%r11),%xmm12
2088         vmovdqa         0xc0(%r11),%xmm13
2089         vmovdqa         0xd0(%r11),%xmm14
2090         vmovdqa         0xe0(%r11),%xmm15
2091         lea             0xf8(%r11),%rsp
2092 .Ldo_avx2_epilogue:
2093 ___
2094 $code.=<<___    if (!$win64);
2095         lea             8(%r11),%rsp
2096 .cfi_def_cfa            %rsp,8
2097 ___
2098 $code.=<<___;
2099         vzeroupper
2100         ret
2101 .cfi_endproc
2102 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
2103 ___
2104 #######################################################################
2105 if ($avx>2) {
2106 # On entry we have input length divisible by 64. But since inner loop
2107 # processes 128 bytes per iteration, cases when length is not divisible
2108 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2109 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2110 # for this tail, we wouldn't have to even allocate stack frame...
2111
2112 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
2113 my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
2114 my $PADBIT="%zmm30";
2115 my $GATHER="%ymm31";
2116
2117 $code.=<<___;
2118 .type   poly1305_blocks_avx512,\@function,4
2119 .align  32
2120 poly1305_blocks_avx512:
2121 .cfi_startproc
2122 .Lblocks_avx512:
2123         vzeroupper
2124 ___
2125 $code.=<<___    if (!$win64);
2126         lea             -8(%rsp),%r11
2127 .cfi_def_cfa            %r11,16
2128         sub             \$0x128,%rsp
2129 ___
2130 $code.=<<___    if ($win64);
2131         lea             -0xf8(%rsp),%r11
2132         sub             \$0x1c8,%rsp
2133         vmovdqa         %xmm6,0x50(%r11)
2134         vmovdqa         %xmm7,0x60(%r11)
2135         vmovdqa         %xmm8,0x70(%r11)
2136         vmovdqa32       %xmm9,0x80(%r11)
2137         vmovdqa32       %xmm10,0x90(%r11)
2138         vmovdqa32       %xmm11,0xa0(%r11)
2139         vmovdqa32       %xmm12,0xb0(%r11)
2140         vmovdqa32       %xmm13,0xc0(%r11)
2141         vmovdqa32       %xmm14,0xd0(%r11)
2142         vmovdqa32       %xmm15,0xe0(%r11)
2143 .Ldo_avx512_body:
2144 ___
2145 $code.=<<___;
2146         lea             .Lconst(%rip),%rcx
2147         lea             48+64($ctx),$ctx        # size optimization
2148         vmovdqa         96(%rcx),$T2            # .Lpermd_avx2
2149
2150         # expand pre-calculated table
2151         vmovdqu32       `16*0-64`($ctx),%x#$R0
2152         and             \$-512,%rsp
2153         vmovdqu32       `16*1-64`($ctx),%x#$R1
2154         vmovdqu32       `16*2-64`($ctx),%x#$S1
2155         vmovdqu32       `16*3-64`($ctx),%x#$R2
2156         vmovdqu32       `16*4-64`($ctx),%x#$S2
2157         vmovdqu32       `16*5-64`($ctx),%x#$R3
2158         vmovdqu32       `16*6-64`($ctx),%x#$S3
2159         vmovdqu32       `16*7-64`($ctx),%x#$R4
2160         vmovdqu32       `16*8-64`($ctx),%x#$S4
2161         vpermd          $R0,$T2,$R0             # 00003412 -> 14243444
2162         vmovdqa64       64(%rcx),$MASK          # .Lmask26
2163         vpermd          $R1,$T2,$R1
2164         vpermd          $S1,$T2,$S1
2165         vpermd          $R2,$T2,$R2
2166         vmovdqa32       $R0,0x00(%rsp)          # save in case $len%128 != 0
2167          vpsrlq         \$32,$R0,$T0            # 14243444 -> 01020304
2168         vpermd          $S2,$T2,$S2
2169         vmovdqa32       $R1,0x20(%rsp)
2170          vpsrlq         \$32,$R1,$T1
2171         vpermd          $R3,$T2,$R3
2172         vmovdqa32       $S1,0x40(%rsp)
2173         vpermd          $S3,$T2,$S3
2174         vpermd          $R4,$T2,$R4
2175         vmovdqa32       $R2,0x60(%rsp)
2176         vpermd          $S4,$T2,$S4
2177         vmovdqa32       $S2,0x80(%rsp)
2178         vmovdqa32       $R3,0xa0(%rsp)
2179         vmovdqa32       $S3,0xc0(%rsp)
2180         vmovdqa32       $R4,0xe0(%rsp)
2181         vmovdqa32       $S4,0x100(%rsp)
2182
2183         ################################################################
2184         # calculate 5th through 8th powers of the key
2185         #
2186         # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2187         # d1 = r0'*r1 + r1'*r0   + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2188         # d2 = r0'*r2 + r1'*r1   + r2'*r0   + r3'*5*r4 + r4'*5*r3
2189         # d3 = r0'*r3 + r1'*r2   + r2'*r1   + r3'*r0   + r4'*5*r4
2190         # d4 = r0'*r4 + r1'*r3   + r2'*r2   + r3'*r1   + r4'*r0
2191
2192         vpmuludq        $T0,$R0,$D0             # d0 = r0'*r0
2193         vpmuludq        $T0,$R1,$D1             # d1 = r0'*r1
2194         vpmuludq        $T0,$R2,$D2             # d2 = r0'*r2
2195         vpmuludq        $T0,$R3,$D3             # d3 = r0'*r3
2196         vpmuludq        $T0,$R4,$D4             # d4 = r0'*r4
2197          vpsrlq         \$32,$R2,$T2
2198
2199         vpmuludq        $T1,$S4,$M0
2200         vpmuludq        $T1,$R0,$M1
2201         vpmuludq        $T1,$R1,$M2
2202         vpmuludq        $T1,$R2,$M3
2203         vpmuludq        $T1,$R3,$M4
2204          vpsrlq         \$32,$R3,$T3
2205         vpaddq          $M0,$D0,$D0             # d0 += r1'*5*r4
2206         vpaddq          $M1,$D1,$D1             # d1 += r1'*r0
2207         vpaddq          $M2,$D2,$D2             # d2 += r1'*r1
2208         vpaddq          $M3,$D3,$D3             # d3 += r1'*r2
2209         vpaddq          $M4,$D4,$D4             # d4 += r1'*r3
2210
2211         vpmuludq        $T2,$S3,$M0
2212         vpmuludq        $T2,$S4,$M1
2213         vpmuludq        $T2,$R1,$M3
2214         vpmuludq        $T2,$R2,$M4
2215         vpmuludq        $T2,$R0,$M2
2216          vpsrlq         \$32,$R4,$T4
2217         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r3
2218         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r4
2219         vpaddq          $M3,$D3,$D3             # d3 += r2'*r1
2220         vpaddq          $M4,$D4,$D4             # d4 += r2'*r2
2221         vpaddq          $M2,$D2,$D2             # d2 += r2'*r0
2222
2223         vpmuludq        $T3,$S2,$M0
2224         vpmuludq        $T3,$R0,$M3
2225         vpmuludq        $T3,$R1,$M4
2226         vpmuludq        $T3,$S3,$M1
2227         vpmuludq        $T3,$S4,$M2
2228         vpaddq          $M0,$D0,$D0             # d0 += r3'*5*r2
2229         vpaddq          $M3,$D3,$D3             # d3 += r3'*r0
2230         vpaddq          $M4,$D4,$D4             # d4 += r3'*r1
2231         vpaddq          $M1,$D1,$D1             # d1 += r3'*5*r3
2232         vpaddq          $M2,$D2,$D2             # d2 += r3'*5*r4
2233
2234         vpmuludq        $T4,$S4,$M3
2235         vpmuludq        $T4,$R0,$M4
2236         vpmuludq        $T4,$S1,$M0
2237         vpmuludq        $T4,$S2,$M1
2238         vpmuludq        $T4,$S3,$M2
2239         vpaddq          $M3,$D3,$D3             # d3 += r2'*5*r4
2240         vpaddq          $M4,$D4,$D4             # d4 += r2'*r0
2241         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r1
2242         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r2
2243         vpaddq          $M2,$D2,$D2             # d2 += r2'*5*r3
2244
2245         ################################################################
2246         # load input
2247         vmovdqu64       16*0($inp),%z#$T3
2248         vmovdqu64       16*4($inp),%z#$T4
2249         lea             16*8($inp),$inp
2250
2251         ################################################################
2252         # lazy reduction
2253
2254         vpsrlq          \$26,$D3,$M3
2255         vpandq          $MASK,$D3,$D3
2256         vpaddq          $M3,$D4,$D4             # d3 -> d4
2257
2258         vpsrlq          \$26,$D0,$M0
2259         vpandq          $MASK,$D0,$D0
2260         vpaddq          $M0,$D1,$D1             # d0 -> d1
2261
2262         vpsrlq          \$26,$D4,$M4
2263         vpandq          $MASK,$D4,$D4
2264
2265         vpsrlq          \$26,$D1,$M1
2266         vpandq          $MASK,$D1,$D1
2267         vpaddq          $M1,$D2,$D2             # d1 -> d2
2268
2269         vpaddq          $M4,$D0,$D0
2270         vpsllq          \$2,$M4,$M4
2271         vpaddq          $M4,$D0,$D0             # d4 -> d0
2272
2273         vpsrlq          \$26,$D2,$M2
2274         vpandq          $MASK,$D2,$D2
2275         vpaddq          $M2,$D3,$D3             # d2 -> d3
2276
2277         vpsrlq          \$26,$D0,$M0
2278         vpandq          $MASK,$D0,$D0
2279         vpaddq          $M0,$D1,$D1             # d0 -> d1
2280
2281         vpsrlq          \$26,$D3,$M3
2282         vpandq          $MASK,$D3,$D3
2283         vpaddq          $M3,$D4,$D4             # d3 -> d4
2284
2285 ___
2286 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));            # switch to %zmm domain
2287 map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
2288 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2289 map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
2290 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2291 map(s/%y/%z/,($MASK));
2292 $code.=<<___;
2293         ################################################################
2294         # at this point we have 14243444 in $R0-$S4 and 05060708 in
2295         # $D0-$D4, ...
2296
2297         vpunpcklqdq     $T4,$T3,$T0     # transpose input
2298         vpunpckhqdq     $T4,$T3,$T4
2299
2300         # ... since input 64-bit lanes are ordered as 73625140, we could
2301         # "vperm" it to 76543210 (here and in each loop iteration), *or*
2302         # we could just flow along, hence the goal for $R0-$S4 is
2303         # 1858286838784888 ...
2304
2305         vmovdqa32       128(%rcx),$M0           # .Lpermd_avx512:
2306         mov             \$0x7777,%eax
2307         kmovw           %eax,%k1
2308
2309         vpermd          $R0,$M0,$R0             # 14243444 -> 1---2---3---4---
2310         vpermd          $R1,$M0,$R1
2311         vpermd          $R2,$M0,$R2
2312         vpermd          $R3,$M0,$R3
2313         vpermd          $R4,$M0,$R4
2314
2315         vpermd          $D0,$M0,${R0}{%k1}      # 05060708 -> 1858286838784888
2316         vpermd          $D1,$M0,${R1}{%k1}
2317         vpermd          $D2,$M0,${R2}{%k1}
2318         vpermd          $D3,$M0,${R3}{%k1}
2319         vpermd          $D4,$M0,${R4}{%k1}
2320
2321         vpslld          \$2,$R1,$S1             # *5
2322         vpslld          \$2,$R2,$S2
2323         vpslld          \$2,$R3,$S3
2324         vpslld          \$2,$R4,$S4
2325         vpaddd          $R1,$S1,$S1
2326         vpaddd          $R2,$S2,$S2
2327         vpaddd          $R3,$S3,$S3
2328         vpaddd          $R4,$S4,$S4
2329
2330         vpbroadcastq    %x#$MASK,$MASK
2331         vpbroadcastq    32(%rcx),$PADBIT        # .L129
2332
2333         vpsrlq          \$52,$T0,$T2            # splat input
2334         vpsllq          \$12,$T4,$T3
2335         vporq           $T3,$T2,$T2
2336         vpsrlq          \$26,$T0,$T1
2337         vpsrlq          \$14,$T4,$T3
2338         vpsrlq          \$40,$T4,$T4            # 4
2339         vpandq          $MASK,$T2,$T2           # 2
2340         vpandq          $MASK,$T0,$T0           # 0
2341         #vpandq         $MASK,$T1,$T1           # 1
2342         #vpandq         $MASK,$T3,$T3           # 3
2343         #vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2344
2345         vpaddq          $H2,$T2,$H2             # accumulate input
2346         sub             \$192,$len
2347         jbe             .Ltail_avx512
2348         #jmp            .Loop_avx512
2349
2350 .align  32
2351 .Loop_avx512:
2352         ################################################################
2353         # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2354         # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2355         # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2356         # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2357         # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2358         # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2359         # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2360         # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2361         #   \________/\___________/
2362         ################################################################
2363         #vpaddq         $H2,$T2,$H2             # accumulate input
2364
2365         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
2366         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
2367         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
2368         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
2369         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2370         #
2371         # however, as h2 is "chronologically" first one available pull
2372         # corresponding operations up, so it's
2373         #
2374         # d3 = h2*r1   + h0*r3 + h1*r2   + h3*r0 + h4*5*r4
2375         # d4 = h2*r2   + h0*r4 + h1*r3   + h3*r1 + h4*r0
2376         # d0 = h2*5*r3 + h0*r0 + h1*5*r4         + h3*5*r2 + h4*5*r1
2377         # d1 = h2*5*r4 + h0*r1           + h1*r0 + h3*5*r3 + h4*5*r2
2378         # d2 = h2*r0           + h0*r2   + h1*r1 + h3*5*r4 + h4*5*r3
2379
2380         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2381          vpaddq         $H0,$T0,$H0
2382         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2383          vpandq         $MASK,$T1,$T1           # 1
2384         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2385          vpandq         $MASK,$T3,$T3           # 3
2386         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2387          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2388         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2389          vpaddq         $H1,$T1,$H1             # accumulate input
2390          vpaddq         $H3,$T3,$H3
2391          vpaddq         $H4,$T4,$H4
2392
2393           vmovdqu64     16*0($inp),$T3          # load input
2394           vmovdqu64     16*4($inp),$T4
2395           lea           16*8($inp),$inp
2396         vpmuludq        $H0,$R3,$M3
2397         vpmuludq        $H0,$R4,$M4
2398         vpmuludq        $H0,$R0,$M0
2399         vpmuludq        $H0,$R1,$M1
2400         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2401         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2402         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2403         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2404
2405         vpmuludq        $H1,$R2,$M3
2406         vpmuludq        $H1,$R3,$M4
2407         vpmuludq        $H1,$S4,$M0
2408         vpmuludq        $H0,$R2,$M2
2409         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2410         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2411         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2412         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2413
2414           vpunpcklqdq   $T4,$T3,$T0             # transpose input
2415           vpunpckhqdq   $T4,$T3,$T4
2416
2417         vpmuludq        $H3,$R0,$M3
2418         vpmuludq        $H3,$R1,$M4
2419         vpmuludq        $H1,$R0,$M1
2420         vpmuludq        $H1,$R1,$M2
2421         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2422         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2423         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2424         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2425
2426         vpmuludq        $H4,$S4,$M3
2427         vpmuludq        $H4,$R0,$M4
2428         vpmuludq        $H3,$S2,$M0
2429         vpmuludq        $H3,$S3,$M1
2430         vpaddq          $M3,$D3,$D3             # d3 += h4*s4
2431         vpmuludq        $H3,$S4,$M2
2432         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2433         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2434         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2435         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2436
2437         vpmuludq        $H4,$S1,$M0
2438         vpmuludq        $H4,$S2,$M1
2439         vpmuludq        $H4,$S3,$M2
2440         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2441         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2442         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2443
2444         ################################################################
2445         # lazy reduction (interleaved with input splat)
2446
2447          vpsrlq         \$52,$T0,$T2            # splat input
2448          vpsllq         \$12,$T4,$T3
2449
2450         vpsrlq          \$26,$D3,$H3
2451         vpandq          $MASK,$D3,$D3
2452         vpaddq          $H3,$D4,$H4             # h3 -> h4
2453
2454          vporq          $T3,$T2,$T2
2455
2456         vpsrlq          \$26,$H0,$D0
2457         vpandq          $MASK,$H0,$H0
2458         vpaddq          $D0,$H1,$H1             # h0 -> h1
2459
2460          vpandq         $MASK,$T2,$T2           # 2
2461
2462         vpsrlq          \$26,$H4,$D4
2463         vpandq          $MASK,$H4,$H4
2464
2465         vpsrlq          \$26,$H1,$D1
2466         vpandq          $MASK,$H1,$H1
2467         vpaddq          $D1,$H2,$H2             # h1 -> h2
2468
2469         vpaddq          $D4,$H0,$H0
2470         vpsllq          \$2,$D4,$D4
2471         vpaddq          $D4,$H0,$H0             # h4 -> h0
2472
2473          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2474          vpsrlq         \$26,$T0,$T1
2475
2476         vpsrlq          \$26,$H2,$D2
2477         vpandq          $MASK,$H2,$H2
2478         vpaddq          $D2,$D3,$H3             # h2 -> h3
2479
2480          vpsrlq         \$14,$T4,$T3
2481
2482         vpsrlq          \$26,$H0,$D0
2483         vpandq          $MASK,$H0,$H0
2484         vpaddq          $D0,$H1,$H1             # h0 -> h1
2485
2486          vpsrlq         \$40,$T4,$T4            # 4
2487
2488         vpsrlq          \$26,$H3,$D3
2489         vpandq          $MASK,$H3,$H3
2490         vpaddq          $D3,$H4,$H4             # h3 -> h4
2491
2492          vpandq         $MASK,$T0,$T0           # 0
2493          #vpandq        $MASK,$T1,$T1           # 1
2494          #vpandq        $MASK,$T3,$T3           # 3
2495          #vporq         $PADBIT,$T4,$T4         # padbit, yes, always
2496
2497         sub             \$128,$len
2498         ja              .Loop_avx512
2499
2500 .Ltail_avx512:
2501         ################################################################
2502         # while above multiplications were by r^8 in all lanes, in last
2503         # iteration we multiply least significant lane by r^8 and most
2504         # significant one by r, that's why table gets shifted...
2505
2506         vpsrlq          \$32,$R0,$R0            # 0105020603070408
2507         vpsrlq          \$32,$R1,$R1
2508         vpsrlq          \$32,$R2,$R2
2509         vpsrlq          \$32,$S3,$S3
2510         vpsrlq          \$32,$S4,$S4
2511         vpsrlq          \$32,$R3,$R3
2512         vpsrlq          \$32,$R4,$R4
2513         vpsrlq          \$32,$S1,$S1
2514         vpsrlq          \$32,$S2,$S2
2515
2516         ################################################################
2517         # load either next or last 64 byte of input
2518         lea             ($inp,$len),$inp
2519
2520         #vpaddq         $H2,$T2,$H2             # accumulate input
2521         vpaddq          $H0,$T0,$H0
2522
2523         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2524         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2525         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2526          vpandq         $MASK,$T1,$T1           # 1
2527         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2528          vpandq         $MASK,$T3,$T3           # 3
2529         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2530          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2531          vpaddq         $H1,$T1,$H1             # accumulate input
2532          vpaddq         $H3,$T3,$H3
2533          vpaddq         $H4,$T4,$H4
2534
2535           vmovdqu64     16*0($inp),%x#$T0
2536         vpmuludq        $H0,$R3,$M3
2537         vpmuludq        $H0,$R4,$M4
2538         vpmuludq        $H0,$R0,$M0
2539         vpmuludq        $H0,$R1,$M1
2540         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2541         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2542         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2543         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2544
2545           vmovdqu64     16*1($inp),%x#$T1
2546         vpmuludq        $H1,$R2,$M3
2547         vpmuludq        $H1,$R3,$M4
2548         vpmuludq        $H1,$S4,$M0
2549         vpmuludq        $H0,$R2,$M2
2550         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2551         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2552         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2553         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2554
2555           vinserti64x2  \$1,16*2($inp),$T0,$T0
2556         vpmuludq        $H3,$R0,$M3
2557         vpmuludq        $H3,$R1,$M4
2558         vpmuludq        $H1,$R0,$M1
2559         vpmuludq        $H1,$R1,$M2
2560         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2561         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2562         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2563         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2564
2565           vinserti64x2  \$1,16*3($inp),$T1,$T1
2566         vpmuludq        $H4,$S4,$M3
2567         vpmuludq        $H4,$R0,$M4
2568         vpmuludq        $H3,$S2,$M0
2569         vpmuludq        $H3,$S3,$M1
2570         vpmuludq        $H3,$S4,$M2
2571         vpaddq          $M3,$D3,$H3             # h3 = d3 + h4*s4
2572         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2573         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2574         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2575         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2576
2577         vpmuludq        $H4,$S1,$M0
2578         vpmuludq        $H4,$S2,$M1
2579         vpmuludq        $H4,$S3,$M2
2580         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2581         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2582         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2583
2584         ################################################################
2585         # horizontal addition
2586
2587         mov             \$1,%eax
2588         vpsrldq         \$8,$H3,$D3
2589         vpsrldq         \$8,$D4,$H4
2590         vpsrldq         \$8,$H0,$D0
2591         vpsrldq         \$8,$H1,$D1
2592         vpsrldq         \$8,$H2,$D2
2593         vpaddq          $D3,$H3,$H3
2594         vpaddq          $D4,$H4,$H4
2595         vpaddq          $D0,$H0,$H0
2596         vpaddq          $D1,$H1,$H1
2597         vpaddq          $D2,$H2,$H2
2598
2599         kmovw           %eax,%k3
2600         vpermq          \$0x2,$H3,$D3
2601         vpermq          \$0x2,$H4,$D4
2602         vpermq          \$0x2,$H0,$D0
2603         vpermq          \$0x2,$H1,$D1
2604         vpermq          \$0x2,$H2,$D2
2605         vpaddq          $D3,$H3,$H3
2606         vpaddq          $D4,$H4,$H4
2607         vpaddq          $D0,$H0,$H0
2608         vpaddq          $D1,$H1,$H1
2609         vpaddq          $D2,$H2,$H2
2610
2611         vextracti64x4   \$0x1,$H3,%y#$D3
2612         vextracti64x4   \$0x1,$H4,%y#$D4
2613         vextracti64x4   \$0x1,$H0,%y#$D0
2614         vextracti64x4   \$0x1,$H1,%y#$D1
2615         vextracti64x4   \$0x1,$H2,%y#$D2
2616         vpaddq          $D3,$H3,${H3}{%k3}{z}   # keep single qword in case
2617         vpaddq          $D4,$H4,${H4}{%k3}{z}   # it's passed to .Ltail_avx2
2618         vpaddq          $D0,$H0,${H0}{%k3}{z}
2619         vpaddq          $D1,$H1,${H1}{%k3}{z}
2620         vpaddq          $D2,$H2,${H2}{%k3}{z}
2621 ___
2622 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2623 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2624 $code.=<<___;
2625         ################################################################
2626         # lazy reduction (interleaved with input splat)
2627
2628         vpsrlq          \$26,$H3,$D3
2629         vpandq          $MASK,$H3,$H3
2630          vpsrldq        \$6,$T0,$T2             # splat input
2631          vpsrldq        \$6,$T1,$T3
2632          vpunpckhqdq    $T1,$T0,$T4             # 4
2633         vpaddq          $D3,$H4,$H4             # h3 -> h4
2634
2635         vpsrlq          \$26,$H0,$D0
2636         vpandq          $MASK,$H0,$H0
2637          vpunpcklqdq    $T3,$T2,$T2             # 2:3
2638          vpunpcklqdq    $T1,$T0,$T0             # 0:1
2639         vpaddq          $D0,$H1,$H1             # h0 -> h1
2640
2641         vpsrlq          \$26,$H4,$D4
2642         vpandq          $MASK,$H4,$H4
2643
2644         vpsrlq          \$26,$H1,$D1
2645         vpandq          $MASK,$H1,$H1
2646          vpsrlq         \$30,$T2,$T3
2647          vpsrlq         \$4,$T2,$T2
2648         vpaddq          $D1,$H2,$H2             # h1 -> h2
2649
2650         vpaddq          $D4,$H0,$H0
2651         vpsllq          \$2,$D4,$D4
2652          vpsrlq         \$26,$T0,$T1
2653          vpsrlq         \$40,$T4,$T4            # 4
2654         vpaddq          $D4,$H0,$H0             # h4 -> h0
2655
2656         vpsrlq          \$26,$H2,$D2
2657         vpandq          $MASK,$H2,$H2
2658          vpandq         $MASK,$T2,$T2           # 2
2659          vpandq         $MASK,$T0,$T0           # 0
2660         vpaddq          $D2,$H3,$H3             # h2 -> h3
2661
2662         vpsrlq          \$26,$H0,$D0
2663         vpandq          $MASK,$H0,$H0
2664          vpaddq         $H2,$T2,$H2             # accumulate input for .Ltail_avx2
2665          vpandq         $MASK,$T1,$T1           # 1
2666         vpaddq          $D0,$H1,$H1             # h0 -> h1
2667
2668         vpsrlq          \$26,$H3,$D3
2669         vpandq          $MASK,$H3,$H3
2670          vpandq         $MASK,$T3,$T3           # 3
2671          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2672         vpaddq          $D3,$H4,$H4             # h3 -> h4
2673
2674         lea             0x90(%rsp),%rax         # size optimization for .Ltail_avx2
2675         add             \$64,$len
2676         jnz             .Ltail_avx2
2677
2678         vpsubq          $T2,$H2,$H2             # undo input accumulation
2679         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2680         vmovd           %x#$H1,`4*1-48-64`($ctx)
2681         vmovd           %x#$H2,`4*2-48-64`($ctx)
2682         vmovd           %x#$H3,`4*3-48-64`($ctx)
2683         vmovd           %x#$H4,`4*4-48-64`($ctx)
2684         vzeroall
2685 ___
2686 $code.=<<___    if ($win64);
2687         movdqa          0x50(%r11),%xmm6
2688         movdqa          0x60(%r11),%xmm7
2689         movdqa          0x70(%r11),%xmm8
2690         movdqa          0x80(%r11),%xmm9
2691         movdqa          0x90(%r11),%xmm10
2692         movdqa          0xa0(%r11),%xmm11
2693         movdqa          0xb0(%r11),%xmm12
2694         movdqa          0xc0(%r11),%xmm13
2695         movdqa          0xd0(%r11),%xmm14
2696         movdqa          0xe0(%r11),%xmm15
2697         lea             0xf8(%r11),%rsp
2698 .Ldo_avx512_epilogue:
2699 ___
2700 $code.=<<___    if (!$win64);
2701         lea             8(%r11),%rsp
2702 .cfi_def_cfa            %rsp,8
2703 ___
2704 $code.=<<___;
2705         ret
2706 .cfi_endproc
2707 .size   poly1305_blocks_avx512,.-poly1305_blocks_avx512
2708 ___
2709 if ($avx>3) {
2710 ########################################################################
2711 # VPMADD52 version using 2^44 radix.
2712 #
2713 # One can argue that base 2^52 would be more natural. Well, even though
2714 # some operations would be more natural, one has to recognize couple of
2715 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2716 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2717 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2718 # reference implementations], which means that more such operations
2719 # would have to be performed in inner loop, which in turn makes critical
2720 # path longer. In other words, even though base 2^44 reduction might
2721 # look less elegant, overall critical path is actually shorter...
2722
2723 ########################################################################
2724 # Layout of opaque area is following.
2725 #
2726 #       unsigned __int64 h[3];          # current hash value base 2^44
2727 #       unsigned __int64 s[2];          # key value*20 base 2^44
2728 #       unsigned __int64 r[3];          # key value base 2^44
2729 #       struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2730 #                                       # r^n positions reflect
2731 #                                       # placement in register, not
2732 #                                       # memory, R[3] is R[1]*20
2733
2734 $code.=<<___;
2735 .type   poly1305_init_base2_44,\@function,3
2736 .align  32
2737 poly1305_init_base2_44:
2738         xor     %rax,%rax
2739         mov     %rax,0($ctx)            # initialize hash value
2740         mov     %rax,8($ctx)
2741         mov     %rax,16($ctx)
2742
2743 .Linit_base2_44:
2744         lea     poly1305_blocks_vpmadd52(%rip),%r10
2745         lea     poly1305_emit_base2_44(%rip),%r11
2746
2747         mov     \$0x0ffffffc0fffffff,%rax
2748         mov     \$0x0ffffffc0ffffffc,%rcx
2749         and     0($inp),%rax
2750         mov     \$0x00000fffffffffff,%r8
2751         and     8($inp),%rcx
2752         mov     \$0x00000fffffffffff,%r9
2753         and     %rax,%r8
2754         shrd    \$44,%rcx,%rax
2755         mov     %r8,40($ctx)            # r0
2756         and     %r9,%rax
2757         shr     \$24,%rcx
2758         mov     %rax,48($ctx)           # r1
2759         lea     (%rax,%rax,4),%rax      # *5
2760         mov     %rcx,56($ctx)           # r2
2761         shl     \$2,%rax                # magic <<2
2762         lea     (%rcx,%rcx,4),%rcx      # *5
2763         shl     \$2,%rcx                # magic <<2
2764         mov     %rax,24($ctx)           # s1
2765         mov     %rcx,32($ctx)           # s2
2766         movq    \$-1,64($ctx)           # write impossible value
2767 ___
2768 $code.=<<___    if ($flavour !~ /elf32/);
2769         mov     %r10,0(%rdx)
2770         mov     %r11,8(%rdx)
2771 ___
2772 $code.=<<___    if ($flavour =~ /elf32/);
2773         mov     %r10d,0(%rdx)
2774         mov     %r11d,4(%rdx)
2775 ___
2776 $code.=<<___;
2777         mov     \$1,%eax
2778         ret
2779 .size   poly1305_init_base2_44,.-poly1305_init_base2_44
2780 ___
2781 {
2782 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2783 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2784 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2785
2786 $code.=<<___;
2787 .type   poly1305_blocks_vpmadd52,\@function,4
2788 .align  32
2789 poly1305_blocks_vpmadd52:
2790         shr     \$4,$len
2791         jz      .Lno_data_vpmadd52              # too short
2792
2793         shl     \$40,$padbit
2794         mov     64($ctx),%r8                    # peek on power of the key
2795
2796         # if powers of the key are not calculated yet, process up to 3
2797         # blocks with this single-block subroutine, otherwise ensure that
2798         # length is divisible by 2 blocks and pass the rest down to next
2799         # subroutine...
2800
2801         mov     \$3,%rax
2802         mov     \$1,%r10
2803         cmp     \$4,$len                        # is input long
2804         cmovae  %r10,%rax
2805         test    %r8,%r8                         # is power value impossible?
2806         cmovns  %r10,%rax
2807
2808         and     $len,%rax                       # is input of favourable length?
2809         jz      .Lblocks_vpmadd52_4x
2810
2811         sub             %rax,$len
2812         mov             \$7,%r10d
2813         mov             \$1,%r11d
2814         kmovw           %r10d,%k7
2815         lea             .L2_44_inp_permd(%rip),%r10
2816         kmovw           %r11d,%k1
2817
2818         vmovq           $padbit,%x#$PAD
2819         vmovdqa64       0(%r10),$inp_permd      # .L2_44_inp_permd
2820         vmovdqa64       32(%r10),$inp_shift     # .L2_44_inp_shift
2821         vpermq          \$0xcf,$PAD,$PAD
2822         vmovdqa64       64(%r10),$reduc_mask    # .L2_44_mask
2823
2824         vmovdqu64       0($ctx),${Dlo}{%k7}{z}          # load hash value
2825         vmovdqu64       40($ctx),${r2r1r0}{%k7}{z}      # load keys
2826         vmovdqu64       32($ctx),${r1r0s2}{%k7}{z}
2827         vmovdqu64       24($ctx),${r0s2s1}{%k7}{z}
2828
2829         vmovdqa64       96(%r10),$reduc_rght    # .L2_44_shift_rgt
2830         vmovdqa64       128(%r10),$reduc_left   # .L2_44_shift_lft
2831
2832         jmp             .Loop_vpmadd52
2833
2834 .align  32
2835 .Loop_vpmadd52:
2836         vmovdqu32       0($inp),%x#$T0          # load input as ----3210
2837         lea             16($inp),$inp
2838
2839         vpermd          $T0,$inp_permd,$T0      # ----3210 -> --322110
2840         vpsrlvq         $inp_shift,$T0,$T0
2841         vpandq          $reduc_mask,$T0,$T0
2842         vporq           $PAD,$T0,$T0
2843
2844         vpaddq          $T0,$Dlo,$Dlo           # accumulate input
2845
2846         vpermq          \$0,$Dlo,${H0}{%k7}{z}  # smash hash value
2847         vpermq          \$0b01010101,$Dlo,${H1}{%k7}{z}
2848         vpermq          \$0b10101010,$Dlo,${H2}{%k7}{z}
2849
2850         vpxord          $Dlo,$Dlo,$Dlo
2851         vpxord          $Dhi,$Dhi,$Dhi
2852
2853         vpmadd52luq     $r2r1r0,$H0,$Dlo
2854         vpmadd52huq     $r2r1r0,$H0,$Dhi
2855
2856         vpmadd52luq     $r1r0s2,$H1,$Dlo
2857         vpmadd52huq     $r1r0s2,$H1,$Dhi
2858
2859         vpmadd52luq     $r0s2s1,$H2,$Dlo
2860         vpmadd52huq     $r0s2s1,$H2,$Dhi
2861
2862         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost qword
2863         vpsllvq         $reduc_left,$Dhi,$Dhi   # 0 in topmost qword
2864         vpandq          $reduc_mask,$Dlo,$Dlo
2865
2866         vpaddq          $T0,$Dhi,$Dhi
2867
2868         vpermq          \$0b10010011,$Dhi,$Dhi  # 0 in lowest qword
2869
2870         vpaddq          $Dhi,$Dlo,$Dlo          # note topmost qword :-)
2871
2872         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost word
2873         vpandq          $reduc_mask,$Dlo,$Dlo
2874
2875         vpermq          \$0b10010011,$T0,$T0
2876
2877         vpaddq          $T0,$Dlo,$Dlo
2878
2879         vpermq          \$0b10010011,$Dlo,${T0}{%k1}{z}
2880
2881         vpaddq          $T0,$Dlo,$Dlo
2882         vpsllq          \$2,$T0,$T0
2883
2884         vpaddq          $T0,$Dlo,$Dlo
2885
2886         dec             %rax                    # len-=16
2887         jnz             .Loop_vpmadd52
2888
2889         vmovdqu64       $Dlo,0($ctx){%k7}       # store hash value
2890
2891         test            $len,$len
2892         jnz             .Lblocks_vpmadd52_4x
2893
2894 .Lno_data_vpmadd52:
2895         ret
2896 .size   poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
2897 ___
2898 }
2899 {
2900 ########################################################################
2901 # As implied by its name 4x subroutine processes 4 blocks in parallel
2902 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
2903 # and is handled in 256-bit %ymm registers.
2904
2905 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
2906 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
2907 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
2908
2909 $code.=<<___;
2910 .type   poly1305_blocks_vpmadd52_4x,\@function,4
2911 .align  32
2912 poly1305_blocks_vpmadd52_4x:
2913         shr     \$4,$len
2914         jz      .Lno_data_vpmadd52_4x           # too short
2915
2916         shl     \$40,$padbit
2917         mov     64($ctx),%r8                    # peek on power of the key
2918
2919 .Lblocks_vpmadd52_4x:
2920         vpbroadcastq    $padbit,$PAD
2921
2922         vmovdqa64       .Lx_mask44(%rip),$mask44
2923         mov             \$5,%eax
2924         vmovdqa64       .Lx_mask42(%rip),$mask42
2925         kmovw           %eax,%k1                # used in 2x path
2926
2927         test            %r8,%r8                 # is power value impossible?
2928         js              .Linit_vpmadd52         # if it is, then init R[4]
2929
2930         vmovq           0($ctx),%x#$H0          # load current hash value
2931         vmovq           8($ctx),%x#$H1
2932         vmovq           16($ctx),%x#$H2
2933
2934         test            \$3,$len                # is length 4*n+2?
2935         jnz             .Lblocks_vpmadd52_2x_do
2936
2937 .Lblocks_vpmadd52_4x_do:
2938         vpbroadcastq    64($ctx),$R0            # load 4th power of the key
2939         vpbroadcastq    96($ctx),$R1
2940         vpbroadcastq    128($ctx),$R2
2941         vpbroadcastq    160($ctx),$S1
2942
2943 .Lblocks_vpmadd52_4x_key_loaded:
2944         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
2945         vpaddq          $R2,$S2,$S2
2946         vpsllq          \$2,$S2,$S2
2947
2948         test            \$7,$len                # is len 8*n?
2949         jz              .Lblocks_vpmadd52_8x
2950
2951         vmovdqu64       16*0($inp),$T2          # load data
2952         vmovdqu64       16*2($inp),$T3
2953         lea             16*4($inp),$inp
2954
2955         vpunpcklqdq     $T3,$T2,$T1             # transpose data
2956         vpunpckhqdq     $T3,$T2,$T3
2957
2958         # at this point 64-bit lanes are ordered as 3-1-2-0
2959
2960         vpsrlq          \$24,$T3,$T2            # splat the data
2961         vporq           $PAD,$T2,$T2
2962          vpaddq         $T2,$H2,$H2             # accumulate input
2963         vpandq          $mask44,$T1,$T0
2964         vpsrlq          \$44,$T1,$T1
2965         vpsllq          \$20,$T3,$T3
2966         vporq           $T3,$T1,$T1
2967         vpandq          $mask44,$T1,$T1
2968
2969         sub             \$4,$len
2970         jz              .Ltail_vpmadd52_4x
2971         jmp             .Loop_vpmadd52_4x
2972         ud2
2973
2974 .align  32
2975 .Linit_vpmadd52:
2976         vmovq           24($ctx),%x#$S1         # load key
2977         vmovq           56($ctx),%x#$H2
2978         vmovq           32($ctx),%x#$S2
2979         vmovq           40($ctx),%x#$R0
2980         vmovq           48($ctx),%x#$R1
2981
2982         vmovdqa         $R0,$H0
2983         vmovdqa         $R1,$H1
2984         vmovdqa         $H2,$R2
2985
2986         mov             \$2,%eax
2987
2988 .Lmul_init_vpmadd52:
2989         vpxorq          $D0lo,$D0lo,$D0lo
2990         vpmadd52luq     $H2,$S1,$D0lo
2991         vpxorq          $D0hi,$D0hi,$D0hi
2992         vpmadd52huq     $H2,$S1,$D0hi
2993         vpxorq          $D1lo,$D1lo,$D1lo
2994         vpmadd52luq     $H2,$S2,$D1lo
2995         vpxorq          $D1hi,$D1hi,$D1hi
2996         vpmadd52huq     $H2,$S2,$D1hi
2997         vpxorq          $D2lo,$D2lo,$D2lo
2998         vpmadd52luq     $H2,$R0,$D2lo
2999         vpxorq          $D2hi,$D2hi,$D2hi
3000         vpmadd52huq     $H2,$R0,$D2hi
3001
3002         vpmadd52luq     $H0,$R0,$D0lo
3003         vpmadd52huq     $H0,$R0,$D0hi
3004         vpmadd52luq     $H0,$R1,$D1lo
3005         vpmadd52huq     $H0,$R1,$D1hi
3006         vpmadd52luq     $H0,$R2,$D2lo
3007         vpmadd52huq     $H0,$R2,$D2hi
3008
3009         vpmadd52luq     $H1,$S2,$D0lo
3010         vpmadd52huq     $H1,$S2,$D0hi
3011         vpmadd52luq     $H1,$R0,$D1lo
3012         vpmadd52huq     $H1,$R0,$D1hi
3013         vpmadd52luq     $H1,$R1,$D2lo
3014         vpmadd52huq     $H1,$R1,$D2hi
3015
3016         ################################################################
3017         # partial reduction
3018         vpsrlq          \$44,$D0lo,$tmp
3019         vpsllq          \$8,$D0hi,$D0hi
3020         vpandq          $mask44,$D0lo,$H0
3021         vpaddq          $tmp,$D0hi,$D0hi
3022
3023         vpaddq          $D0hi,$D1lo,$D1lo
3024
3025         vpsrlq          \$44,$D1lo,$tmp
3026         vpsllq          \$8,$D1hi,$D1hi
3027         vpandq          $mask44,$D1lo,$H1
3028         vpaddq          $tmp,$D1hi,$D1hi
3029
3030         vpaddq          $D1hi,$D2lo,$D2lo
3031
3032         vpsrlq          \$42,$D2lo,$tmp
3033         vpsllq          \$10,$D2hi,$D2hi
3034         vpandq          $mask42,$D2lo,$H2
3035         vpaddq          $tmp,$D2hi,$D2hi
3036
3037         vpaddq          $D2hi,$H0,$H0
3038         vpsllq          \$2,$D2hi,$D2hi
3039
3040         vpaddq          $D2hi,$H0,$H0
3041
3042         vpsrlq          \$44,$H0,$tmp           # additional step
3043         vpandq          $mask44,$H0,$H0
3044
3045         vpaddq          $tmp,$H1,$H1
3046
3047         dec             %eax
3048         jz              .Ldone_init_vpmadd52
3049
3050         vpunpcklqdq     $R1,$H1,$R1             # 1,2
3051         vpbroadcastq    %x#$H1,%x#$H1           # 2,2
3052         vpunpcklqdq     $R2,$H2,$R2
3053         vpbroadcastq    %x#$H2,%x#$H2
3054         vpunpcklqdq     $R0,$H0,$R0
3055         vpbroadcastq    %x#$H0,%x#$H0
3056
3057         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3058         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3059         vpaddq          $R1,$S1,$S1
3060         vpaddq          $R2,$S2,$S2
3061         vpsllq          \$2,$S1,$S1
3062         vpsllq          \$2,$S2,$S2
3063
3064         jmp             .Lmul_init_vpmadd52
3065         ud2
3066
3067 .align  32
3068 .Ldone_init_vpmadd52:
3069         vinserti128     \$1,%x#$R1,$H1,$R1      # 1,2,3,4
3070         vinserti128     \$1,%x#$R2,$H2,$R2
3071         vinserti128     \$1,%x#$R0,$H0,$R0
3072
3073         vpermq          \$0b11011000,$R1,$R1    # 1,3,2,4
3074         vpermq          \$0b11011000,$R2,$R2
3075         vpermq          \$0b11011000,$R0,$R0
3076
3077         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3078         vpaddq          $R1,$S1,$S1
3079         vpsllq          \$2,$S1,$S1
3080
3081         vmovq           0($ctx),%x#$H0          # load current hash value
3082         vmovq           8($ctx),%x#$H1
3083  &