451908401e00b40f940f8d9bc4e5e746f59412e5
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Initial release.
22 #
23 # December 2016
24 #
25 # Add AVX512F+VL+BW code path.
26 #
27 # November 2017
28 #
29 # Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
30 # executed even on Knights Landing. Trigger for modification was
31 # observation that AVX512 code paths can negatively affect overall
32 # Skylake-X system performance. Since we are likely to suppress
33 # AVX512F capability flag [at least on Skylake-X], conversion serves
34 # as kind of "investment protection". Note that next *lake processor,
35 # Cannolake, has AVX512IFMA code path to execute...
36 #
37 # Numbers are cycles per processed byte with poly1305_blocks alone,
38 # measured with rdtsc at fixed clock frequency.
39 #
40 #               IALU/gcc-4.8(*) AVX(**)         AVX2    AVX-512
41 # P4            4.46/+120%      -
42 # Core 2        2.41/+90%       -
43 # Westmere      1.88/+120%      -
44 # Sandy Bridge  1.39/+140%      1.10
45 # Haswell       1.14/+175%      1.11            0.65
46 # Skylake[-X]   1.13/+120%      0.96            0.51    [0.35]
47 # Silvermont    2.83/+95%       -
48 # Knights L     3.60/?          1.65            1.10    ?
49 # Goldmont      1.70/+180%      -
50 # VIA Nano      1.82/+150%      -
51 # Sledgehammer  1.38/+160%      -
52 # Bulldozer     2.30/+130%      0.97
53 # Ryzen         1.15/+200%      1.08            1.18
54 #
55 # (*)   improvement coefficients relative to clang are more modest and
56 #       are ~50% on most processors, in both cases we are comparing to
57 #       __int128 code;
58 # (**)  SSE2 implementation was attempted, but among non-AVX processors
59 #       it was faster than integer-only code only on older Intel P4 and
60 #       Core processors, 50-30%, less newer processor is, but slower on
61 #       contemporary ones, for example almost 2x slower on Atom, and as
62 #       former are naturally disappearing, SSE2 is deemed unnecessary;
63
64 $flavour = shift;
65 $output  = shift;
66 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
67
68 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
69
70 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
71 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
72 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
73 die "can't locate x86_64-xlate.pl";
74
75 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
76                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
77         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26);
78 }
79
80 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
81            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
82         $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12);
83         $avx += 2 if ($1==2.11 && $2>=8);
84 }
85
86 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
87            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
88         $avx = ($1>=10) + ($1>=12);
89 }
90
91 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
92         $avx = ($2>=3.0) + ($2>3.0);
93 }
94
95 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
96 *STDOUT=*OUT;
97
98 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
99 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
100 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
101 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
102
103 sub poly1305_iteration {
104 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
105 # output:       $h0-$h2 *= $r0-$r1
106 $code.=<<___;
107         mulq    $h0                     # h0*r1
108         mov     %rax,$d2
109          mov    $r0,%rax
110         mov     %rdx,$d3
111
112         mulq    $h0                     # h0*r0
113         mov     %rax,$h0                # future $h0
114          mov    $r0,%rax
115         mov     %rdx,$d1
116
117         mulq    $h1                     # h1*r0
118         add     %rax,$d2
119          mov    $s1,%rax
120         adc     %rdx,$d3
121
122         mulq    $h1                     # h1*s1
123          mov    $h2,$h1                 # borrow $h1
124         add     %rax,$h0
125         adc     %rdx,$d1
126
127         imulq   $s1,$h1                 # h2*s1
128         add     $h1,$d2
129          mov    $d1,$h1
130         adc     \$0,$d3
131
132         imulq   $r0,$h2                 # h2*r0
133         add     $d2,$h1
134         mov     \$-4,%rax               # mask value
135         adc     $h2,$d3
136
137         and     $d3,%rax                # last reduction step
138         mov     $d3,$h2
139         shr     \$2,$d3
140         and     \$3,$h2
141         add     $d3,%rax
142         add     %rax,$h0
143         adc     \$0,$h1
144         adc     \$0,$h2
145 ___
146 }
147
148 ########################################################################
149 # Layout of opaque area is following.
150 #
151 #       unsigned __int64 h[3];          # current hash value base 2^64
152 #       unsigned __int64 r[2];          # key value base 2^64
153
154 $code.=<<___;
155 .text
156
157 .extern OPENSSL_ia32cap_P
158
159 .globl  poly1305_init
160 .hidden poly1305_init
161 .globl  poly1305_blocks
162 .hidden poly1305_blocks
163 .globl  poly1305_emit
164 .hidden poly1305_emit
165
166 .type   poly1305_init,\@function,3
167 .align  32
168 poly1305_init:
169         xor     %rax,%rax
170         mov     %rax,0($ctx)            # initialize hash value
171         mov     %rax,8($ctx)
172         mov     %rax,16($ctx)
173
174         cmp     \$0,$inp
175         je      .Lno_key
176
177         lea     poly1305_blocks(%rip),%r10
178         lea     poly1305_emit(%rip),%r11
179 ___
180 $code.=<<___    if ($avx);
181         mov     OPENSSL_ia32cap_P+4(%rip),%r9
182         lea     poly1305_blocks_avx(%rip),%rax
183         lea     poly1305_emit_avx(%rip),%rcx
184         bt      \$`60-32`,%r9           # AVX?
185         cmovc   %rax,%r10
186         cmovc   %rcx,%r11
187 ___
188 $code.=<<___    if ($avx>1);
189         lea     poly1305_blocks_avx2(%rip),%rax
190         bt      \$`5+32`,%r9            # AVX2?
191         cmovc   %rax,%r10
192 ___
193 $code.=<<___    if ($avx>3);
194         mov     \$`(1<<31|1<<21|1<<16)`,%rax
195         shr     \$32,%r9
196         and     %rax,%r9
197         cmp     %rax,%r9
198         je      .Linit_base2_44
199 ___
200 $code.=<<___;
201         mov     \$0x0ffffffc0fffffff,%rax
202         mov     \$0x0ffffffc0ffffffc,%rcx
203         and     0($inp),%rax
204         and     8($inp),%rcx
205         mov     %rax,24($ctx)
206         mov     %rcx,32($ctx)
207 ___
208 $code.=<<___    if ($flavour !~ /elf32/);
209         mov     %r10,0(%rdx)
210         mov     %r11,8(%rdx)
211 ___
212 $code.=<<___    if ($flavour =~ /elf32/);
213         mov     %r10d,0(%rdx)
214         mov     %r11d,4(%rdx)
215 ___
216 $code.=<<___;
217         mov     \$1,%eax
218 .Lno_key:
219         ret
220 .size   poly1305_init,.-poly1305_init
221
222 .type   poly1305_blocks,\@function,4
223 .align  32
224 poly1305_blocks:
225 .cfi_startproc
226 .Lblocks:
227         shr     \$4,$len
228         jz      .Lno_data               # too short
229
230         push    %rbx
231 .cfi_push       %rbx
232         push    %rbp
233 .cfi_push       %rbp
234         push    %r12
235 .cfi_push       %r12
236         push    %r13
237 .cfi_push       %r13
238         push    %r14
239 .cfi_push       %r14
240         push    %r15
241 .cfi_push       %r15
242 .Lblocks_body:
243
244         mov     $len,%r15               # reassign $len
245
246         mov     24($ctx),$r0            # load r
247         mov     32($ctx),$s1
248
249         mov     0($ctx),$h0             # load hash value
250         mov     8($ctx),$h1
251         mov     16($ctx),$h2
252
253         mov     $s1,$r1
254         shr     \$2,$s1
255         mov     $r1,%rax
256         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
257         jmp     .Loop
258
259 .align  32
260 .Loop:
261         add     0($inp),$h0             # accumulate input
262         adc     8($inp),$h1
263         lea     16($inp),$inp
264         adc     $padbit,$h2
265 ___
266         &poly1305_iteration();
267 $code.=<<___;
268         mov     $r1,%rax
269         dec     %r15                    # len-=16
270         jnz     .Loop
271
272         mov     $h0,0($ctx)             # store hash value
273         mov     $h1,8($ctx)
274         mov     $h2,16($ctx)
275
276         mov     0(%rsp),%r15
277 .cfi_restore    %r15
278         mov     8(%rsp),%r14
279 .cfi_restore    %r14
280         mov     16(%rsp),%r13
281 .cfi_restore    %r13
282         mov     24(%rsp),%r12
283 .cfi_restore    %r12
284         mov     32(%rsp),%rbp
285 .cfi_restore    %rbp
286         mov     40(%rsp),%rbx
287 .cfi_restore    %rbx
288         lea     48(%rsp),%rsp
289 .cfi_adjust_cfa_offset  -48
290 .Lno_data:
291 .Lblocks_epilogue:
292         ret
293 .cfi_endproc
294 .size   poly1305_blocks,.-poly1305_blocks
295
296 .type   poly1305_emit,\@function,3
297 .align  32
298 poly1305_emit:
299 .Lemit:
300         mov     0($ctx),%r8     # load hash value
301         mov     8($ctx),%r9
302         mov     16($ctx),%r10
303
304         mov     %r8,%rax
305         add     \$5,%r8         # compare to modulus
306         mov     %r9,%rcx
307         adc     \$0,%r9
308         adc     \$0,%r10
309         shr     \$2,%r10        # did 130-bit value overflow?
310         cmovnz  %r8,%rax
311         cmovnz  %r9,%rcx
312
313         add     0($nonce),%rax  # accumulate nonce
314         adc     8($nonce),%rcx
315         mov     %rax,0($mac)    # write result
316         mov     %rcx,8($mac)
317
318         ret
319 .size   poly1305_emit,.-poly1305_emit
320 ___
321 if ($avx) {
322
323 ########################################################################
324 # Layout of opaque area is following.
325 #
326 #       unsigned __int32 h[5];          # current hash value base 2^26
327 #       unsigned __int32 is_base2_26;
328 #       unsigned __int64 r[2];          # key value base 2^64
329 #       unsigned __int64 pad;
330 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
331 #
332 # where r^n are base 2^26 digits of degrees of multiplier key. There are
333 # 5 digits, but last four are interleaved with multiples of 5, totalling
334 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
335
336 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
337     map("%xmm$_",(0..15));
338
339 $code.=<<___;
340 .type   __poly1305_block,\@abi-omnipotent
341 .align  32
342 __poly1305_block:
343 ___
344         &poly1305_iteration();
345 $code.=<<___;
346         ret
347 .size   __poly1305_block,.-__poly1305_block
348
349 .type   __poly1305_init_avx,\@abi-omnipotent
350 .align  32
351 __poly1305_init_avx:
352         mov     $r0,$h0
353         mov     $r1,$h1
354         xor     $h2,$h2
355
356         lea     48+64($ctx),$ctx        # size optimization
357
358         mov     $r1,%rax
359         call    __poly1305_block        # r^2
360
361         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
362         mov     \$0x3ffffff,%edx
363         mov     $h0,$d1
364         and     $h0#d,%eax
365         mov     $r0,$d2
366         and     $r0#d,%edx
367         mov     %eax,`16*0+0-64`($ctx)
368         shr     \$26,$d1
369         mov     %edx,`16*0+4-64`($ctx)
370         shr     \$26,$d2
371
372         mov     \$0x3ffffff,%eax
373         mov     \$0x3ffffff,%edx
374         and     $d1#d,%eax
375         and     $d2#d,%edx
376         mov     %eax,`16*1+0-64`($ctx)
377         lea     (%rax,%rax,4),%eax      # *5
378         mov     %edx,`16*1+4-64`($ctx)
379         lea     (%rdx,%rdx,4),%edx      # *5
380         mov     %eax,`16*2+0-64`($ctx)
381         shr     \$26,$d1
382         mov     %edx,`16*2+4-64`($ctx)
383         shr     \$26,$d2
384
385         mov     $h1,%rax
386         mov     $r1,%rdx
387         shl     \$12,%rax
388         shl     \$12,%rdx
389         or      $d1,%rax
390         or      $d2,%rdx
391         and     \$0x3ffffff,%eax
392         and     \$0x3ffffff,%edx
393         mov     %eax,`16*3+0-64`($ctx)
394         lea     (%rax,%rax,4),%eax      # *5
395         mov     %edx,`16*3+4-64`($ctx)
396         lea     (%rdx,%rdx,4),%edx      # *5
397         mov     %eax,`16*4+0-64`($ctx)
398         mov     $h1,$d1
399         mov     %edx,`16*4+4-64`($ctx)
400         mov     $r1,$d2
401
402         mov     \$0x3ffffff,%eax
403         mov     \$0x3ffffff,%edx
404         shr     \$14,$d1
405         shr     \$14,$d2
406         and     $d1#d,%eax
407         and     $d2#d,%edx
408         mov     %eax,`16*5+0-64`($ctx)
409         lea     (%rax,%rax,4),%eax      # *5
410         mov     %edx,`16*5+4-64`($ctx)
411         lea     (%rdx,%rdx,4),%edx      # *5
412         mov     %eax,`16*6+0-64`($ctx)
413         shr     \$26,$d1
414         mov     %edx,`16*6+4-64`($ctx)
415         shr     \$26,$d2
416
417         mov     $h2,%rax
418         shl     \$24,%rax
419         or      %rax,$d1
420         mov     $d1#d,`16*7+0-64`($ctx)
421         lea     ($d1,$d1,4),$d1         # *5
422         mov     $d2#d,`16*7+4-64`($ctx)
423         lea     ($d2,$d2,4),$d2         # *5
424         mov     $d1#d,`16*8+0-64`($ctx)
425         mov     $d2#d,`16*8+4-64`($ctx)
426
427         mov     $r1,%rax
428         call    __poly1305_block        # r^3
429
430         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
431         mov     $h0,$d1
432         and     $h0#d,%eax
433         shr     \$26,$d1
434         mov     %eax,`16*0+12-64`($ctx)
435
436         mov     \$0x3ffffff,%edx
437         and     $d1#d,%edx
438         mov     %edx,`16*1+12-64`($ctx)
439         lea     (%rdx,%rdx,4),%edx      # *5
440         shr     \$26,$d1
441         mov     %edx,`16*2+12-64`($ctx)
442
443         mov     $h1,%rax
444         shl     \$12,%rax
445         or      $d1,%rax
446         and     \$0x3ffffff,%eax
447         mov     %eax,`16*3+12-64`($ctx)
448         lea     (%rax,%rax,4),%eax      # *5
449         mov     $h1,$d1
450         mov     %eax,`16*4+12-64`($ctx)
451
452         mov     \$0x3ffffff,%edx
453         shr     \$14,$d1
454         and     $d1#d,%edx
455         mov     %edx,`16*5+12-64`($ctx)
456         lea     (%rdx,%rdx,4),%edx      # *5
457         shr     \$26,$d1
458         mov     %edx,`16*6+12-64`($ctx)
459
460         mov     $h2,%rax
461         shl     \$24,%rax
462         or      %rax,$d1
463         mov     $d1#d,`16*7+12-64`($ctx)
464         lea     ($d1,$d1,4),$d1         # *5
465         mov     $d1#d,`16*8+12-64`($ctx)
466
467         mov     $r1,%rax
468         call    __poly1305_block        # r^4
469
470         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
471         mov     $h0,$d1
472         and     $h0#d,%eax
473         shr     \$26,$d1
474         mov     %eax,`16*0+8-64`($ctx)
475
476         mov     \$0x3ffffff,%edx
477         and     $d1#d,%edx
478         mov     %edx,`16*1+8-64`($ctx)
479         lea     (%rdx,%rdx,4),%edx      # *5
480         shr     \$26,$d1
481         mov     %edx,`16*2+8-64`($ctx)
482
483         mov     $h1,%rax
484         shl     \$12,%rax
485         or      $d1,%rax
486         and     \$0x3ffffff,%eax
487         mov     %eax,`16*3+8-64`($ctx)
488         lea     (%rax,%rax,4),%eax      # *5
489         mov     $h1,$d1
490         mov     %eax,`16*4+8-64`($ctx)
491
492         mov     \$0x3ffffff,%edx
493         shr     \$14,$d1
494         and     $d1#d,%edx
495         mov     %edx,`16*5+8-64`($ctx)
496         lea     (%rdx,%rdx,4),%edx      # *5
497         shr     \$26,$d1
498         mov     %edx,`16*6+8-64`($ctx)
499
500         mov     $h2,%rax
501         shl     \$24,%rax
502         or      %rax,$d1
503         mov     $d1#d,`16*7+8-64`($ctx)
504         lea     ($d1,$d1,4),$d1         # *5
505         mov     $d1#d,`16*8+8-64`($ctx)
506
507         lea     -48-64($ctx),$ctx       # size [de-]optimization
508         ret
509 .size   __poly1305_init_avx,.-__poly1305_init_avx
510
511 .type   poly1305_blocks_avx,\@function,4
512 .align  32
513 poly1305_blocks_avx:
514 .cfi_startproc
515         mov     20($ctx),%r8d           # is_base2_26
516         cmp     \$128,$len
517         jae     .Lblocks_avx
518         test    %r8d,%r8d
519         jz      .Lblocks
520
521 .Lblocks_avx:
522         and     \$-16,$len
523         jz      .Lno_data_avx
524
525         vzeroupper
526
527         test    %r8d,%r8d
528         jz      .Lbase2_64_avx
529
530         test    \$31,$len
531         jz      .Leven_avx
532
533         push    %rbx
534 .cfi_push       %rbx
535         push    %rbp
536 .cfi_push       %rbp
537         push    %r12
538 .cfi_push       %r12
539         push    %r13
540 .cfi_push       %r13
541         push    %r14
542 .cfi_push       %r14
543         push    %r15
544 .cfi_push       %r15
545 .Lblocks_avx_body:
546
547         mov     $len,%r15               # reassign $len
548
549         mov     0($ctx),$d1             # load hash value
550         mov     8($ctx),$d2
551         mov     16($ctx),$h2#d
552
553         mov     24($ctx),$r0            # load r
554         mov     32($ctx),$s1
555
556         ################################# base 2^26 -> base 2^64
557         mov     $d1#d,$h0#d
558         and     \$`-1*(1<<31)`,$d1
559         mov     $d2,$r1                 # borrow $r1
560         mov     $d2#d,$h1#d
561         and     \$`-1*(1<<31)`,$d2
562
563         shr     \$6,$d1
564         shl     \$52,$r1
565         add     $d1,$h0
566         shr     \$12,$h1
567         shr     \$18,$d2
568         add     $r1,$h0
569         adc     $d2,$h1
570
571         mov     $h2,$d1
572         shl     \$40,$d1
573         shr     \$24,$h2
574         add     $d1,$h1
575         adc     \$0,$h2                 # can be partially reduced...
576
577         mov     \$-4,$d2                # ... so reduce
578         mov     $h2,$d1
579         and     $h2,$d2
580         shr     \$2,$d1
581         and     \$3,$h2
582         add     $d2,$d1                 # =*5
583         add     $d1,$h0
584         adc     \$0,$h1
585         adc     \$0,$h2
586
587         mov     $s1,$r1
588         mov     $s1,%rax
589         shr     \$2,$s1
590         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
591
592         add     0($inp),$h0             # accumulate input
593         adc     8($inp),$h1
594         lea     16($inp),$inp
595         adc     $padbit,$h2
596
597         call    __poly1305_block
598
599         test    $padbit,$padbit         # if $padbit is zero,
600         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
601
602         ################################# base 2^64 -> base 2^26
603         mov     $h0,%rax
604         mov     $h0,%rdx
605         shr     \$52,$h0
606         mov     $h1,$r0
607         mov     $h1,$r1
608         shr     \$26,%rdx
609         and     \$0x3ffffff,%rax        # h[0]
610         shl     \$12,$r0
611         and     \$0x3ffffff,%rdx        # h[1]
612         shr     \$14,$h1
613         or      $r0,$h0
614         shl     \$24,$h2
615         and     \$0x3ffffff,$h0         # h[2]
616         shr     \$40,$r1
617         and     \$0x3ffffff,$h1         # h[3]
618         or      $r1,$h2                 # h[4]
619
620         sub     \$16,%r15
621         jz      .Lstore_base2_26_avx
622
623         vmovd   %rax#d,$H0
624         vmovd   %rdx#d,$H1
625         vmovd   $h0#d,$H2
626         vmovd   $h1#d,$H3
627         vmovd   $h2#d,$H4
628         jmp     .Lproceed_avx
629
630 .align  32
631 .Lstore_base2_64_avx:
632         mov     $h0,0($ctx)
633         mov     $h1,8($ctx)
634         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
635         jmp     .Ldone_avx
636
637 .align  16
638 .Lstore_base2_26_avx:
639         mov     %rax#d,0($ctx)          # store hash value base 2^26
640         mov     %rdx#d,4($ctx)
641         mov     $h0#d,8($ctx)
642         mov     $h1#d,12($ctx)
643         mov     $h2#d,16($ctx)
644 .align  16
645 .Ldone_avx:
646         mov     0(%rsp),%r15
647 .cfi_restore    %r15
648         mov     8(%rsp),%r14
649 .cfi_restore    %r14
650         mov     16(%rsp),%r13
651 .cfi_restore    %r13
652         mov     24(%rsp),%r12
653 .cfi_restore    %r12
654         mov     32(%rsp),%rbp
655 .cfi_restore    %rbp
656         mov     40(%rsp),%rbx
657 .cfi_restore    %rbx
658         lea     48(%rsp),%rsp
659 .cfi_adjust_cfa_offset  -48
660 .Lno_data_avx:
661 .Lblocks_avx_epilogue:
662         ret
663 .cfi_endproc
664
665 .align  32
666 .Lbase2_64_avx:
667 .cfi_startproc
668         push    %rbx
669 .cfi_push       %rbx
670         push    %rbp
671 .cfi_push       %rbp
672         push    %r12
673 .cfi_push       %r12
674         push    %r13
675 .cfi_push       %r13
676         push    %r14
677 .cfi_push       %r14
678         push    %r15
679 .cfi_push       %r15
680 .Lbase2_64_avx_body:
681
682         mov     $len,%r15               # reassign $len
683
684         mov     24($ctx),$r0            # load r
685         mov     32($ctx),$s1
686
687         mov     0($ctx),$h0             # load hash value
688         mov     8($ctx),$h1
689         mov     16($ctx),$h2#d
690
691         mov     $s1,$r1
692         mov     $s1,%rax
693         shr     \$2,$s1
694         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
695
696         test    \$31,$len
697         jz      .Linit_avx
698
699         add     0($inp),$h0             # accumulate input
700         adc     8($inp),$h1
701         lea     16($inp),$inp
702         adc     $padbit,$h2
703         sub     \$16,%r15
704
705         call    __poly1305_block
706
707 .Linit_avx:
708         ################################# base 2^64 -> base 2^26
709         mov     $h0,%rax
710         mov     $h0,%rdx
711         shr     \$52,$h0
712         mov     $h1,$d1
713         mov     $h1,$d2
714         shr     \$26,%rdx
715         and     \$0x3ffffff,%rax        # h[0]
716         shl     \$12,$d1
717         and     \$0x3ffffff,%rdx        # h[1]
718         shr     \$14,$h1
719         or      $d1,$h0
720         shl     \$24,$h2
721         and     \$0x3ffffff,$h0         # h[2]
722         shr     \$40,$d2
723         and     \$0x3ffffff,$h1         # h[3]
724         or      $d2,$h2                 # h[4]
725
726         vmovd   %rax#d,$H0
727         vmovd   %rdx#d,$H1
728         vmovd   $h0#d,$H2
729         vmovd   $h1#d,$H3
730         vmovd   $h2#d,$H4
731         movl    \$1,20($ctx)            # set is_base2_26
732
733         call    __poly1305_init_avx
734
735 .Lproceed_avx:
736         mov     %r15,$len
737
738         mov     0(%rsp),%r15
739 .cfi_restore    %r15
740         mov     8(%rsp),%r14
741 .cfi_restore    %r14
742         mov     16(%rsp),%r13
743 .cfi_restore    %r13
744         mov     24(%rsp),%r12
745 .cfi_restore    %r12
746         mov     32(%rsp),%rbp
747 .cfi_restore    %rbp
748         mov     40(%rsp),%rbx
749 .cfi_restore    %rbx
750         lea     48(%rsp),%rax
751         lea     48(%rsp),%rsp
752 .cfi_adjust_cfa_offset  -48
753 .Lbase2_64_avx_epilogue:
754         jmp     .Ldo_avx
755 .cfi_endproc
756
757 .align  32
758 .Leven_avx:
759 .cfi_startproc
760         vmovd           4*0($ctx),$H0           # load hash value
761         vmovd           4*1($ctx),$H1
762         vmovd           4*2($ctx),$H2
763         vmovd           4*3($ctx),$H3
764         vmovd           4*4($ctx),$H4
765
766 .Ldo_avx:
767 ___
768 $code.=<<___    if (!$win64);
769         lea             -0x58(%rsp),%r11
770 .cfi_def_cfa            %r11,0x60
771         sub             \$0x178,%rsp
772 ___
773 $code.=<<___    if ($win64);
774         lea             -0xf8(%rsp),%r11
775         sub             \$0x218,%rsp
776         vmovdqa         %xmm6,0x50(%r11)
777         vmovdqa         %xmm7,0x60(%r11)
778         vmovdqa         %xmm8,0x70(%r11)
779         vmovdqa         %xmm9,0x80(%r11)
780         vmovdqa         %xmm10,0x90(%r11)
781         vmovdqa         %xmm11,0xa0(%r11)
782         vmovdqa         %xmm12,0xb0(%r11)
783         vmovdqa         %xmm13,0xc0(%r11)
784         vmovdqa         %xmm14,0xd0(%r11)
785         vmovdqa         %xmm15,0xe0(%r11)
786 .Ldo_avx_body:
787 ___
788 $code.=<<___;
789         sub             \$64,$len
790         lea             -32($inp),%rax
791         cmovc           %rax,$inp
792
793         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
794         lea             `16*3+64`($ctx),$ctx    # size optimization
795         lea             .Lconst(%rip),%rcx
796
797         ################################################################
798         # load input
799         vmovdqu         16*2($inp),$T0
800         vmovdqu         16*3($inp),$T1
801         vmovdqa         64(%rcx),$MASK          # .Lmask26
802
803         vpsrldq         \$6,$T0,$T2             # splat input
804         vpsrldq         \$6,$T1,$T3
805         vpunpckhqdq     $T1,$T0,$T4             # 4
806         vpunpcklqdq     $T1,$T0,$T0             # 0:1
807         vpunpcklqdq     $T3,$T2,$T3             # 2:3
808
809         vpsrlq          \$40,$T4,$T4            # 4
810         vpsrlq          \$26,$T0,$T1
811         vpand           $MASK,$T0,$T0           # 0
812         vpsrlq          \$4,$T3,$T2
813         vpand           $MASK,$T1,$T1           # 1
814         vpsrlq          \$30,$T3,$T3
815         vpand           $MASK,$T2,$T2           # 2
816         vpand           $MASK,$T3,$T3           # 3
817         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
818
819         jbe             .Lskip_loop_avx
820
821         # expand and copy pre-calculated table to stack
822         vmovdqu         `16*1-64`($ctx),$D1
823         vmovdqu         `16*2-64`($ctx),$D2
824         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
825         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
826         vmovdqa         $D3,-0x90(%r11)
827         vmovdqa         $D0,0x00(%rsp)
828         vpshufd         \$0xEE,$D1,$D4
829         vmovdqu         `16*3-64`($ctx),$D0
830         vpshufd         \$0x44,$D1,$D1
831         vmovdqa         $D4,-0x80(%r11)
832         vmovdqa         $D1,0x10(%rsp)
833         vpshufd         \$0xEE,$D2,$D3
834         vmovdqu         `16*4-64`($ctx),$D1
835         vpshufd         \$0x44,$D2,$D2
836         vmovdqa         $D3,-0x70(%r11)
837         vmovdqa         $D2,0x20(%rsp)
838         vpshufd         \$0xEE,$D0,$D4
839         vmovdqu         `16*5-64`($ctx),$D2
840         vpshufd         \$0x44,$D0,$D0
841         vmovdqa         $D4,-0x60(%r11)
842         vmovdqa         $D0,0x30(%rsp)
843         vpshufd         \$0xEE,$D1,$D3
844         vmovdqu         `16*6-64`($ctx),$D0
845         vpshufd         \$0x44,$D1,$D1
846         vmovdqa         $D3,-0x50(%r11)
847         vmovdqa         $D1,0x40(%rsp)
848         vpshufd         \$0xEE,$D2,$D4
849         vmovdqu         `16*7-64`($ctx),$D1
850         vpshufd         \$0x44,$D2,$D2
851         vmovdqa         $D4,-0x40(%r11)
852         vmovdqa         $D2,0x50(%rsp)
853         vpshufd         \$0xEE,$D0,$D3
854         vmovdqu         `16*8-64`($ctx),$D2
855         vpshufd         \$0x44,$D0,$D0
856         vmovdqa         $D3,-0x30(%r11)
857         vmovdqa         $D0,0x60(%rsp)
858         vpshufd         \$0xEE,$D1,$D4
859         vpshufd         \$0x44,$D1,$D1
860         vmovdqa         $D4,-0x20(%r11)
861         vmovdqa         $D1,0x70(%rsp)
862         vpshufd         \$0xEE,$D2,$D3
863          vmovdqa        0x00(%rsp),$D4          # preload r0^2
864         vpshufd         \$0x44,$D2,$D2
865         vmovdqa         $D3,-0x10(%r11)
866         vmovdqa         $D2,0x80(%rsp)
867
868         jmp             .Loop_avx
869
870 .align  32
871 .Loop_avx:
872         ################################################################
873         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
874         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
875         #   \___________________/
876         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
877         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
878         #   \___________________/ \____________________/
879         #
880         # Note that we start with inp[2:3]*r^2. This is because it
881         # doesn't depend on reduction in previous iteration.
882         ################################################################
883         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
884         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
885         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
886         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
887         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
888         #
889         # though note that $Tx and $Hx are "reversed" in this section,
890         # and $D4 is preloaded with r0^2...
891
892         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
893         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
894           vmovdqa       $H2,0x20(%r11)                          # offload hash
895         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
896          vmovdqa        0x10(%rsp),$H2          # r1^2
897         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
898         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
899
900           vmovdqa       $H0,0x00(%r11)                          #
901         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
902           vmovdqa       $H1,0x10(%r11)                          #
903         vpmuludq        $T3,$H2,$H1             # h3*r1
904         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
905         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
906           vmovdqa       $H3,0x30(%r11)                          #
907         vpmuludq        $T2,$H2,$H0             # h2*r1
908         vpmuludq        $T1,$H2,$H1             # h1*r1
909         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
910          vmovdqa        0x30(%rsp),$H3          # r2^2
911         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
912           vmovdqa       $H4,0x40(%r11)                          #
913         vpmuludq        $T0,$H2,$H2             # h0*r1
914          vpmuludq       $T2,$H3,$H0             # h2*r2
915         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
916
917          vmovdqa        0x40(%rsp),$H4          # s2^2
918         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
919         vpmuludq        $T1,$H3,$H1             # h1*r2
920         vpmuludq        $T0,$H3,$H3             # h0*r2
921         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
922          vmovdqa        0x50(%rsp),$H2          # r3^2
923         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
924         vpmuludq        $T4,$H4,$H0             # h4*s2
925         vpmuludq        $T3,$H4,$H4             # h3*s2
926         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
927          vmovdqa        0x60(%rsp),$H3          # s3^2
928         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
929
930          vmovdqa        0x80(%rsp),$H4          # s4^2
931         vpmuludq        $T1,$H2,$H1             # h1*r3
932         vpmuludq        $T0,$H2,$H2             # h0*r3
933         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
934         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
935         vpmuludq        $T4,$H3,$H0             # h4*s3
936         vpmuludq        $T3,$H3,$H1             # h3*s3
937         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
938          vmovdqu        16*0($inp),$H0                          # load input
939         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
940         vpmuludq        $T2,$H3,$H3             # h2*s3
941          vpmuludq       $T2,$H4,$T2             # h2*s4
942         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
943
944          vmovdqu        16*1($inp),$H1                          #
945         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
946         vpmuludq        $T3,$H4,$T3             # h3*s4
947         vpmuludq        $T4,$H4,$T4             # h4*s4
948          vpsrldq        \$6,$H0,$H2                             # splat input
949         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
950         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
951          vpsrldq        \$6,$H1,$H3                             #
952         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
953         vpmuludq        $T1,$H4,$T0             # h1*s4
954          vpunpckhqdq    $H1,$H0,$H4             # 4
955         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
956          vmovdqa        -0x90(%r11),$T4         # r0^4
957         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
958
959         vpunpcklqdq     $H1,$H0,$H0             # 0:1
960         vpunpcklqdq     $H3,$H2,$H3             # 2:3
961
962         #vpsrlq         \$40,$H4,$H4            # 4
963         vpsrldq         \$`40/8`,$H4,$H4        # 4
964         vpsrlq          \$26,$H0,$H1
965         vpand           $MASK,$H0,$H0           # 0
966         vpsrlq          \$4,$H3,$H2
967         vpand           $MASK,$H1,$H1           # 1
968         vpand           0(%rcx),$H4,$H4         # .Lmask24
969         vpsrlq          \$30,$H3,$H3
970         vpand           $MASK,$H2,$H2           # 2
971         vpand           $MASK,$H3,$H3           # 3
972         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
973
974         vpaddq          0x00(%r11),$H0,$H0      # add hash value
975         vpaddq          0x10(%r11),$H1,$H1
976         vpaddq          0x20(%r11),$H2,$H2
977         vpaddq          0x30(%r11),$H3,$H3
978         vpaddq          0x40(%r11),$H4,$H4
979
980         lea             16*2($inp),%rax
981         lea             16*4($inp),$inp
982         sub             \$64,$len
983         cmovc           %rax,$inp
984
985         ################################################################
986         # Now we accumulate (inp[0:1]+hash)*r^4
987         ################################################################
988         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
989         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
990         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
991         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
992         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
993
994         vpmuludq        $H0,$T4,$T0             # h0*r0
995         vpmuludq        $H1,$T4,$T1             # h1*r0
996         vpaddq          $T0,$D0,$D0
997         vpaddq          $T1,$D1,$D1
998          vmovdqa        -0x80(%r11),$T2         # r1^4
999         vpmuludq        $H2,$T4,$T0             # h2*r0
1000         vpmuludq        $H3,$T4,$T1             # h3*r0
1001         vpaddq          $T0,$D2,$D2
1002         vpaddq          $T1,$D3,$D3
1003         vpmuludq        $H4,$T4,$T4             # h4*r0
1004          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
1005         vpaddq          $T4,$D4,$D4
1006
1007         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
1008         vpmuludq        $H2,$T2,$T1             # h2*r1
1009         vpmuludq        $H3,$T2,$T0             # h3*r1
1010         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1011          vmovdqa        -0x60(%r11),$T3         # r2^4
1012         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1013         vpmuludq        $H1,$T2,$T1             # h1*r1
1014         vpmuludq        $H0,$T2,$T2             # h0*r1
1015         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
1016         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1017
1018          vmovdqa        -0x50(%r11),$T4         # s2^4
1019         vpmuludq        $H2,$T3,$T0             # h2*r2
1020         vpmuludq        $H1,$T3,$T1             # h1*r2
1021         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
1022         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
1023          vmovdqa        -0x40(%r11),$T2         # r3^4
1024         vpmuludq        $H0,$T3,$T3             # h0*r2
1025         vpmuludq        $H4,$T4,$T0             # h4*s2
1026         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
1027         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
1028          vmovdqa        -0x30(%r11),$T3         # s3^4
1029         vpmuludq        $H3,$T4,$T4             # h3*s2
1030          vpmuludq       $H1,$T2,$T1             # h1*r3
1031         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1032
1033          vmovdqa        -0x10(%r11),$T4         # s4^4
1034         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
1035         vpmuludq        $H0,$T2,$T2             # h0*r3
1036         vpmuludq        $H4,$T3,$T0             # h4*s3
1037         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
1038         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
1039          vmovdqu        16*2($inp),$T0                          # load input
1040         vpmuludq        $H3,$T3,$T2             # h3*s3
1041         vpmuludq        $H2,$T3,$T3             # h2*s3
1042         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
1043          vmovdqu        16*3($inp),$T1                          #
1044         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
1045
1046         vpmuludq        $H2,$T4,$H2             # h2*s4
1047         vpmuludq        $H3,$T4,$H3             # h3*s4
1048          vpsrldq        \$6,$T0,$T2                             # splat input
1049         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
1050         vpmuludq        $H4,$T4,$H4             # h4*s4
1051          vpsrldq        \$6,$T1,$T3                             #
1052         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
1053         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
1054         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
1055         vpmuludq        $H1,$T4,$H0
1056          vpunpckhqdq    $T1,$T0,$T4             # 4
1057         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1058         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1059
1060         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1061         vpunpcklqdq     $T3,$T2,$T3             # 2:3
1062
1063         #vpsrlq         \$40,$T4,$T4            # 4
1064         vpsrldq         \$`40/8`,$T4,$T4        # 4
1065         vpsrlq          \$26,$T0,$T1
1066          vmovdqa        0x00(%rsp),$D4          # preload r0^2
1067         vpand           $MASK,$T0,$T0           # 0
1068         vpsrlq          \$4,$T3,$T2
1069         vpand           $MASK,$T1,$T1           # 1
1070         vpand           0(%rcx),$T4,$T4         # .Lmask24
1071         vpsrlq          \$30,$T3,$T3
1072         vpand           $MASK,$T2,$T2           # 2
1073         vpand           $MASK,$T3,$T3           # 3
1074         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1075
1076         ################################################################
1077         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1078         # and P. Schwabe
1079
1080         vpsrlq          \$26,$H3,$D3
1081         vpand           $MASK,$H3,$H3
1082         vpaddq          $D3,$H4,$H4             # h3 -> h4
1083
1084         vpsrlq          \$26,$H0,$D0
1085         vpand           $MASK,$H0,$H0
1086         vpaddq          $D0,$D1,$H1             # h0 -> h1
1087
1088         vpsrlq          \$26,$H4,$D0
1089         vpand           $MASK,$H4,$H4
1090
1091         vpsrlq          \$26,$H1,$D1
1092         vpand           $MASK,$H1,$H1
1093         vpaddq          $D1,$H2,$H2             # h1 -> h2
1094
1095         vpaddq          $D0,$H0,$H0
1096         vpsllq          \$2,$D0,$D0
1097         vpaddq          $D0,$H0,$H0             # h4 -> h0
1098
1099         vpsrlq          \$26,$H2,$D2
1100         vpand           $MASK,$H2,$H2
1101         vpaddq          $D2,$H3,$H3             # h2 -> h3
1102
1103         vpsrlq          \$26,$H0,$D0
1104         vpand           $MASK,$H0,$H0
1105         vpaddq          $D0,$H1,$H1             # h0 -> h1
1106
1107         vpsrlq          \$26,$H3,$D3
1108         vpand           $MASK,$H3,$H3
1109         vpaddq          $D3,$H4,$H4             # h3 -> h4
1110
1111         ja              .Loop_avx
1112
1113 .Lskip_loop_avx:
1114         ################################################################
1115         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1116
1117         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1118         add             \$32,$len
1119         jnz             .Long_tail_avx
1120
1121         vpaddq          $H2,$T2,$T2
1122         vpaddq          $H0,$T0,$T0
1123         vpaddq          $H1,$T1,$T1
1124         vpaddq          $H3,$T3,$T3
1125         vpaddq          $H4,$T4,$T4
1126
1127 .Long_tail_avx:
1128         vmovdqa         $H2,0x20(%r11)
1129         vmovdqa         $H0,0x00(%r11)
1130         vmovdqa         $H1,0x10(%r11)
1131         vmovdqa         $H3,0x30(%r11)
1132         vmovdqa         $H4,0x40(%r11)
1133
1134         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1135         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1136         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1137         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1138         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1139
1140         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1141         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1142          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1143         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1144         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1145         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1146
1147         vpmuludq        $T3,$H2,$H0             # h3*r1
1148         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1149          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1150         vpmuludq        $T2,$H2,$H1             # h2*r1
1151         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1152          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1153         vpmuludq        $T1,$H2,$H0             # h1*r1
1154         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1155         vpmuludq        $T0,$H2,$H2             # h0*r1
1156         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1157         vpmuludq        $T4,$H3,$H3             # h4*s1
1158         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1159
1160          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1161         vpmuludq        $T2,$H4,$H1             # h2*r2
1162         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1163         vpmuludq        $T1,$H4,$H0             # h1*r2
1164         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1165          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1166         vpmuludq        $T0,$H4,$H4             # h0*r2
1167         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1168         vpmuludq        $T4,$H2,$H1             # h4*s2
1169         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1170          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1171         vpmuludq        $T3,$H2,$H2             # h3*s2
1172         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1173
1174         vpmuludq        $T1,$H3,$H0             # h1*r3
1175         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1176         vpmuludq        $T0,$H3,$H3             # h0*r3
1177         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1178          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1179         vpmuludq        $T4,$H4,$H1             # h4*s3
1180         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1181          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1182         vpmuludq        $T3,$H4,$H0             # h3*s3
1183         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1184         vpmuludq        $T2,$H4,$H4             # h2*s3
1185         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1186
1187         vpmuludq        $T0,$H2,$H2             # h0*r4
1188         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1189         vpmuludq        $T4,$H3,$H1             # h4*s4
1190         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1191         vpmuludq        $T3,$H3,$H0             # h3*s4
1192         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1193         vpmuludq        $T2,$H3,$H1             # h2*s4
1194         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1195         vpmuludq        $T1,$H3,$H3             # h1*s4
1196         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1197
1198         jz              .Lshort_tail_avx
1199
1200         vmovdqu         16*0($inp),$H0          # load input
1201         vmovdqu         16*1($inp),$H1
1202
1203         vpsrldq         \$6,$H0,$H2             # splat input
1204         vpsrldq         \$6,$H1,$H3
1205         vpunpckhqdq     $H1,$H0,$H4             # 4
1206         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1207         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1208
1209         vpsrlq          \$40,$H4,$H4            # 4
1210         vpsrlq          \$26,$H0,$H1
1211         vpand           $MASK,$H0,$H0           # 0
1212         vpsrlq          \$4,$H3,$H2
1213         vpand           $MASK,$H1,$H1           # 1
1214         vpsrlq          \$30,$H3,$H3
1215         vpand           $MASK,$H2,$H2           # 2
1216         vpand           $MASK,$H3,$H3           # 3
1217         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1218
1219         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1220         vpaddq          0x00(%r11),$H0,$H0
1221         vpaddq          0x10(%r11),$H1,$H1
1222         vpaddq          0x20(%r11),$H2,$H2
1223         vpaddq          0x30(%r11),$H3,$H3
1224         vpaddq          0x40(%r11),$H4,$H4
1225
1226         ################################################################
1227         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1228
1229         vpmuludq        $H0,$T4,$T0             # h0*r0
1230         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1231         vpmuludq        $H1,$T4,$T1             # h1*r0
1232         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1233         vpmuludq        $H2,$T4,$T0             # h2*r0
1234         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1235          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1236         vpmuludq        $H3,$T4,$T1             # h3*r0
1237         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1238         vpmuludq        $H4,$T4,$T4             # h4*r0
1239         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1240
1241         vpmuludq        $H3,$T2,$T0             # h3*r1
1242         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1243          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1244         vpmuludq        $H2,$T2,$T1             # h2*r1
1245         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1246          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1247         vpmuludq        $H1,$T2,$T0             # h1*r1
1248         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1249         vpmuludq        $H0,$T2,$T2             # h0*r1
1250         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1251         vpmuludq        $H4,$T3,$T3             # h4*s1
1252         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1253
1254          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1255         vpmuludq        $H2,$T4,$T1             # h2*r2
1256         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1257         vpmuludq        $H1,$T4,$T0             # h1*r2
1258         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1259          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1260         vpmuludq        $H0,$T4,$T4             # h0*r2
1261         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1262         vpmuludq        $H4,$T2,$T1             # h4*s2
1263         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1264          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1265         vpmuludq        $H3,$T2,$T2             # h3*s2
1266         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1267
1268         vpmuludq        $H1,$T3,$T0             # h1*r3
1269         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1270         vpmuludq        $H0,$T3,$T3             # h0*r3
1271         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1272          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1273         vpmuludq        $H4,$T4,$T1             # h4*s3
1274         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1275          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1276         vpmuludq        $H3,$T4,$T0             # h3*s3
1277         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1278         vpmuludq        $H2,$T4,$T4             # h2*s3
1279         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1280
1281         vpmuludq        $H0,$T2,$T2             # h0*r4
1282         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1283         vpmuludq        $H4,$T3,$T1             # h4*s4
1284         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1285         vpmuludq        $H3,$T3,$T0             # h3*s4
1286         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1287         vpmuludq        $H2,$T3,$T1             # h2*s4
1288         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1289         vpmuludq        $H1,$T3,$T3             # h1*s4
1290         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1291
1292 .Lshort_tail_avx:
1293         ################################################################
1294         # horizontal addition
1295
1296         vpsrldq         \$8,$D4,$T4
1297         vpsrldq         \$8,$D3,$T3
1298         vpsrldq         \$8,$D1,$T1
1299         vpsrldq         \$8,$D0,$T0
1300         vpsrldq         \$8,$D2,$T2
1301         vpaddq          $T3,$D3,$D3
1302         vpaddq          $T4,$D4,$D4
1303         vpaddq          $T0,$D0,$D0
1304         vpaddq          $T1,$D1,$D1
1305         vpaddq          $T2,$D2,$D2
1306
1307         ################################################################
1308         # lazy reduction
1309
1310         vpsrlq          \$26,$D3,$H3
1311         vpand           $MASK,$D3,$D3
1312         vpaddq          $H3,$D4,$D4             # h3 -> h4
1313
1314         vpsrlq          \$26,$D0,$H0
1315         vpand           $MASK,$D0,$D0
1316         vpaddq          $H0,$D1,$D1             # h0 -> h1
1317
1318         vpsrlq          \$26,$D4,$H4
1319         vpand           $MASK,$D4,$D4
1320
1321         vpsrlq          \$26,$D1,$H1
1322         vpand           $MASK,$D1,$D1
1323         vpaddq          $H1,$D2,$D2             # h1 -> h2
1324
1325         vpaddq          $H4,$D0,$D0
1326         vpsllq          \$2,$H4,$H4
1327         vpaddq          $H4,$D0,$D0             # h4 -> h0
1328
1329         vpsrlq          \$26,$D2,$H2
1330         vpand           $MASK,$D2,$D2
1331         vpaddq          $H2,$D3,$D3             # h2 -> h3
1332
1333         vpsrlq          \$26,$D0,$H0
1334         vpand           $MASK,$D0,$D0
1335         vpaddq          $H0,$D1,$D1             # h0 -> h1
1336
1337         vpsrlq          \$26,$D3,$H3
1338         vpand           $MASK,$D3,$D3
1339         vpaddq          $H3,$D4,$D4             # h3 -> h4
1340
1341         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1342         vmovd           $D1,`4*1-48-64`($ctx)
1343         vmovd           $D2,`4*2-48-64`($ctx)
1344         vmovd           $D3,`4*3-48-64`($ctx)
1345         vmovd           $D4,`4*4-48-64`($ctx)
1346 ___
1347 $code.=<<___    if ($win64);
1348         vmovdqa         0x50(%r11),%xmm6
1349         vmovdqa         0x60(%r11),%xmm7
1350         vmovdqa         0x70(%r11),%xmm8
1351         vmovdqa         0x80(%r11),%xmm9
1352         vmovdqa         0x90(%r11),%xmm10
1353         vmovdqa         0xa0(%r11),%xmm11
1354         vmovdqa         0xb0(%r11),%xmm12
1355         vmovdqa         0xc0(%r11),%xmm13
1356         vmovdqa         0xd0(%r11),%xmm14
1357         vmovdqa         0xe0(%r11),%xmm15
1358         lea             0xf8(%r11),%rsp
1359 .Ldo_avx_epilogue:
1360 ___
1361 $code.=<<___    if (!$win64);
1362         lea             0x58(%r11),%rsp
1363 .cfi_def_cfa            %rsp,8
1364 ___
1365 $code.=<<___;
1366         vzeroupper
1367         ret
1368 .cfi_endproc
1369 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1370
1371 .type   poly1305_emit_avx,\@function,3
1372 .align  32
1373 poly1305_emit_avx:
1374         cmpl    \$0,20($ctx)    # is_base2_26?
1375         je      .Lemit
1376
1377         mov     0($ctx),%eax    # load hash value base 2^26
1378         mov     4($ctx),%ecx
1379         mov     8($ctx),%r8d
1380         mov     12($ctx),%r11d
1381         mov     16($ctx),%r10d
1382
1383         shl     \$26,%rcx       # base 2^26 -> base 2^64
1384         mov     %r8,%r9
1385         shl     \$52,%r8
1386         add     %rcx,%rax
1387         shr     \$12,%r9
1388         add     %rax,%r8        # h0
1389         adc     \$0,%r9
1390
1391         shl     \$14,%r11
1392         mov     %r10,%rax
1393         shr     \$24,%r10
1394         add     %r11,%r9
1395         shl     \$40,%rax
1396         add     %rax,%r9        # h1
1397         adc     \$0,%r10        # h2
1398
1399         mov     %r10,%rax       # could be partially reduced, so reduce
1400         mov     %r10,%rcx
1401         and     \$3,%r10
1402         shr     \$2,%rax
1403         and     \$-4,%rcx
1404         add     %rcx,%rax
1405         add     %rax,%r8
1406         adc     \$0,%r9
1407         adc     \$0,%r10
1408
1409         mov     %r8,%rax
1410         add     \$5,%r8         # compare to modulus
1411         mov     %r9,%rcx
1412         adc     \$0,%r9
1413         adc     \$0,%r10
1414         shr     \$2,%r10        # did 130-bit value overflow?
1415         cmovnz  %r8,%rax
1416         cmovnz  %r9,%rcx
1417
1418         add     0($nonce),%rax  # accumulate nonce
1419         adc     8($nonce),%rcx
1420         mov     %rax,0($mac)    # write result
1421         mov     %rcx,8($mac)
1422
1423         ret
1424 .size   poly1305_emit_avx,.-poly1305_emit_avx
1425 ___
1426
1427 if ($avx>1) {
1428 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1429     map("%ymm$_",(0..15));
1430 my $S4=$MASK;
1431
1432 $code.=<<___;
1433 .type   poly1305_blocks_avx2,\@function,4
1434 .align  32
1435 poly1305_blocks_avx2:
1436 .cfi_startproc
1437         mov     20($ctx),%r8d           # is_base2_26
1438         cmp     \$128,$len
1439         jae     .Lblocks_avx2
1440         test    %r8d,%r8d
1441         jz      .Lblocks
1442
1443 .Lblocks_avx2:
1444         and     \$-16,$len
1445         jz      .Lno_data_avx2
1446
1447         vzeroupper
1448
1449         test    %r8d,%r8d
1450         jz      .Lbase2_64_avx2
1451
1452         test    \$63,$len
1453         jz      .Leven_avx2
1454
1455         push    %rbx
1456 .cfi_push       %rbx
1457         push    %rbp
1458 .cfi_push       %rbp
1459         push    %r12
1460 .cfi_push       %r12
1461         push    %r13
1462 .cfi_push       %r13
1463         push    %r14
1464 .cfi_push       %r14
1465         push    %r15
1466 .cfi_push       %r15
1467 .Lblocks_avx2_body:
1468
1469         mov     $len,%r15               # reassign $len
1470
1471         mov     0($ctx),$d1             # load hash value
1472         mov     8($ctx),$d2
1473         mov     16($ctx),$h2#d
1474
1475         mov     24($ctx),$r0            # load r
1476         mov     32($ctx),$s1
1477
1478         ################################# base 2^26 -> base 2^64
1479         mov     $d1#d,$h0#d
1480         and     \$`-1*(1<<31)`,$d1
1481         mov     $d2,$r1                 # borrow $r1
1482         mov     $d2#d,$h1#d
1483         and     \$`-1*(1<<31)`,$d2
1484
1485         shr     \$6,$d1
1486         shl     \$52,$r1
1487         add     $d1,$h0
1488         shr     \$12,$h1
1489         shr     \$18,$d2
1490         add     $r1,$h0
1491         adc     $d2,$h1
1492
1493         mov     $h2,$d1
1494         shl     \$40,$d1
1495         shr     \$24,$h2
1496         add     $d1,$h1
1497         adc     \$0,$h2                 # can be partially reduced...
1498
1499         mov     \$-4,$d2                # ... so reduce
1500         mov     $h2,$d1
1501         and     $h2,$d2
1502         shr     \$2,$d1
1503         and     \$3,$h2
1504         add     $d2,$d1                 # =*5
1505         add     $d1,$h0
1506         adc     \$0,$h1
1507         adc     \$0,$h2
1508
1509         mov     $s1,$r1
1510         mov     $s1,%rax
1511         shr     \$2,$s1
1512         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1513
1514 .Lbase2_26_pre_avx2:
1515         add     0($inp),$h0             # accumulate input
1516         adc     8($inp),$h1
1517         lea     16($inp),$inp
1518         adc     $padbit,$h2
1519         sub     \$16,%r15
1520
1521         call    __poly1305_block
1522         mov     $r1,%rax
1523
1524         test    \$63,%r15
1525         jnz     .Lbase2_26_pre_avx2
1526
1527         test    $padbit,$padbit         # if $padbit is zero,
1528         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1529
1530         ################################# base 2^64 -> base 2^26
1531         mov     $h0,%rax
1532         mov     $h0,%rdx
1533         shr     \$52,$h0
1534         mov     $h1,$r0
1535         mov     $h1,$r1
1536         shr     \$26,%rdx
1537         and     \$0x3ffffff,%rax        # h[0]
1538         shl     \$12,$r0
1539         and     \$0x3ffffff,%rdx        # h[1]
1540         shr     \$14,$h1
1541         or      $r0,$h0
1542         shl     \$24,$h2
1543         and     \$0x3ffffff,$h0         # h[2]
1544         shr     \$40,$r1
1545         and     \$0x3ffffff,$h1         # h[3]
1546         or      $r1,$h2                 # h[4]
1547
1548         test    %r15,%r15
1549         jz      .Lstore_base2_26_avx2
1550
1551         vmovd   %rax#d,%x#$H0
1552         vmovd   %rdx#d,%x#$H1
1553         vmovd   $h0#d,%x#$H2
1554         vmovd   $h1#d,%x#$H3
1555         vmovd   $h2#d,%x#$H4
1556         jmp     .Lproceed_avx2
1557
1558 .align  32
1559 .Lstore_base2_64_avx2:
1560         mov     $h0,0($ctx)
1561         mov     $h1,8($ctx)
1562         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1563         jmp     .Ldone_avx2
1564
1565 .align  16
1566 .Lstore_base2_26_avx2:
1567         mov     %rax#d,0($ctx)          # store hash value base 2^26
1568         mov     %rdx#d,4($ctx)
1569         mov     $h0#d,8($ctx)
1570         mov     $h1#d,12($ctx)
1571         mov     $h2#d,16($ctx)
1572 .align  16
1573 .Ldone_avx2:
1574         mov     0(%rsp),%r15
1575 .cfi_restore    %r15
1576         mov     8(%rsp),%r14
1577 .cfi_restore    %r14
1578         mov     16(%rsp),%r13
1579 .cfi_restore    %r13
1580         mov     24(%rsp),%r12
1581 .cfi_restore    %r12
1582         mov     32(%rsp),%rbp
1583 .cfi_restore    %rbp
1584         mov     40(%rsp),%rbx
1585 .cfi_restore    %rbx
1586         lea     48(%rsp),%rsp
1587 .cfi_adjust_cfa_offset  -48
1588 .Lno_data_avx2:
1589 .Lblocks_avx2_epilogue:
1590         ret
1591 .cfi_endproc
1592
1593 .align  32
1594 .Lbase2_64_avx2:
1595 .cfi_startproc
1596         push    %rbx
1597 .cfi_push       %rbx
1598         push    %rbp
1599 .cfi_push       %rbp
1600         push    %r12
1601 .cfi_push       %r12
1602         push    %r13
1603 .cfi_push       %r13
1604         push    %r14
1605 .cfi_push       %r14
1606         push    %r15
1607 .cfi_push       %r15
1608 .Lbase2_64_avx2_body:
1609
1610         mov     $len,%r15               # reassign $len
1611
1612         mov     24($ctx),$r0            # load r
1613         mov     32($ctx),$s1
1614
1615         mov     0($ctx),$h0             # load hash value
1616         mov     8($ctx),$h1
1617         mov     16($ctx),$h2#d
1618
1619         mov     $s1,$r1
1620         mov     $s1,%rax
1621         shr     \$2,$s1
1622         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1623
1624         test    \$63,$len
1625         jz      .Linit_avx2
1626
1627 .Lbase2_64_pre_avx2:
1628         add     0($inp),$h0             # accumulate input
1629         adc     8($inp),$h1
1630         lea     16($inp),$inp
1631         adc     $padbit,$h2
1632         sub     \$16,%r15
1633
1634         call    __poly1305_block
1635         mov     $r1,%rax
1636
1637         test    \$63,%r15
1638         jnz     .Lbase2_64_pre_avx2
1639
1640 .Linit_avx2:
1641         ################################# base 2^64 -> base 2^26
1642         mov     $h0,%rax
1643         mov     $h0,%rdx
1644         shr     \$52,$h0
1645         mov     $h1,$d1
1646         mov     $h1,$d2
1647         shr     \$26,%rdx
1648         and     \$0x3ffffff,%rax        # h[0]
1649         shl     \$12,$d1
1650         and     \$0x3ffffff,%rdx        # h[1]
1651         shr     \$14,$h1
1652         or      $d1,$h0
1653         shl     \$24,$h2
1654         and     \$0x3ffffff,$h0         # h[2]
1655         shr     \$40,$d2
1656         and     \$0x3ffffff,$h1         # h[3]
1657         or      $d2,$h2                 # h[4]
1658
1659         vmovd   %rax#d,%x#$H0
1660         vmovd   %rdx#d,%x#$H1
1661         vmovd   $h0#d,%x#$H2
1662         vmovd   $h1#d,%x#$H3
1663         vmovd   $h2#d,%x#$H4
1664         movl    \$1,20($ctx)            # set is_base2_26
1665
1666         call    __poly1305_init_avx
1667
1668 .Lproceed_avx2:
1669         mov     %r15,$len                       # restore $len
1670         mov     OPENSSL_ia32cap_P+8(%rip),%r10d
1671         mov     \$`(1<<31|1<<30|1<<16)`,%r11d
1672
1673         mov     0(%rsp),%r15
1674 .cfi_restore    %r15
1675         mov     8(%rsp),%r14
1676 .cfi_restore    %r14
1677         mov     16(%rsp),%r13
1678 .cfi_restore    %r13
1679         mov     24(%rsp),%r12
1680 .cfi_restore    %r12
1681         mov     32(%rsp),%rbp
1682 .cfi_restore    %rbp
1683         mov     40(%rsp),%rbx
1684 .cfi_restore    %rbx
1685         lea     48(%rsp),%rax
1686         lea     48(%rsp),%rsp
1687 .cfi_adjust_cfa_offset  -48
1688 .Lbase2_64_avx2_epilogue:
1689         jmp     .Ldo_avx2
1690 .cfi_endproc
1691
1692 .align  32
1693 .Leven_avx2:
1694 .cfi_startproc
1695         mov             OPENSSL_ia32cap_P+8(%rip),%r10d
1696         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1697         vmovd           4*1($ctx),%x#$H1
1698         vmovd           4*2($ctx),%x#$H2
1699         vmovd           4*3($ctx),%x#$H3
1700         vmovd           4*4($ctx),%x#$H4
1701
1702 .Ldo_avx2:
1703 ___
1704 $code.=<<___            if ($avx>2);
1705         cmp             \$512,$len
1706         jb              .Lskip_avx512
1707         and             %r11d,%r10d
1708         test            \$`1<<16`,%r10d         # check for AVX512F
1709         jnz             .Lblocks_avx512
1710 .Lskip_avx512:
1711 ___
1712 $code.=<<___    if (!$win64);
1713         lea             -8(%rsp),%r11
1714 .cfi_def_cfa            %r11,16
1715         sub             \$0x128,%rsp
1716 ___
1717 $code.=<<___    if ($win64);
1718         lea             -0xf8(%rsp),%r11
1719         sub             \$0x1c8,%rsp
1720         vmovdqa         %xmm6,0x50(%r11)
1721         vmovdqa         %xmm7,0x60(%r11)
1722         vmovdqa         %xmm8,0x70(%r11)
1723         vmovdqa         %xmm9,0x80(%r11)
1724         vmovdqa         %xmm10,0x90(%r11)
1725         vmovdqa         %xmm11,0xa0(%r11)
1726         vmovdqa         %xmm12,0xb0(%r11)
1727         vmovdqa         %xmm13,0xc0(%r11)
1728         vmovdqa         %xmm14,0xd0(%r11)
1729         vmovdqa         %xmm15,0xe0(%r11)
1730 .Ldo_avx2_body:
1731 ___
1732 $code.=<<___;
1733         lea             .Lconst(%rip),%rcx
1734         lea             48+64($ctx),$ctx        # size optimization
1735         vmovdqa         96(%rcx),$T0            # .Lpermd_avx2
1736
1737         # expand and copy pre-calculated table to stack
1738         vmovdqu         `16*0-64`($ctx),%x#$T2
1739         and             \$-512,%rsp
1740         vmovdqu         `16*1-64`($ctx),%x#$T3
1741         vmovdqu         `16*2-64`($ctx),%x#$T4
1742         vmovdqu         `16*3-64`($ctx),%x#$D0
1743         vmovdqu         `16*4-64`($ctx),%x#$D1
1744         vmovdqu         `16*5-64`($ctx),%x#$D2
1745         lea             0x90(%rsp),%rax         # size optimization
1746         vmovdqu         `16*6-64`($ctx),%x#$D3
1747         vpermd          $T2,$T0,$T2             # 00003412 -> 14243444
1748         vmovdqu         `16*7-64`($ctx),%x#$D4
1749         vpermd          $T3,$T0,$T3
1750         vmovdqu         `16*8-64`($ctx),%x#$MASK
1751         vpermd          $T4,$T0,$T4
1752         vmovdqa         $T2,0x00(%rsp)
1753         vpermd          $D0,$T0,$D0
1754         vmovdqa         $T3,0x20-0x90(%rax)
1755         vpermd          $D1,$T0,$D1
1756         vmovdqa         $T4,0x40-0x90(%rax)
1757         vpermd          $D2,$T0,$D2
1758         vmovdqa         $D0,0x60-0x90(%rax)
1759         vpermd          $D3,$T0,$D3
1760         vmovdqa         $D1,0x80-0x90(%rax)
1761         vpermd          $D4,$T0,$D4
1762         vmovdqa         $D2,0xa0-0x90(%rax)
1763         vpermd          $MASK,$T0,$MASK
1764         vmovdqa         $D3,0xc0-0x90(%rax)
1765         vmovdqa         $D4,0xe0-0x90(%rax)
1766         vmovdqa         $MASK,0x100-0x90(%rax)
1767         vmovdqa         64(%rcx),$MASK          # .Lmask26
1768
1769         ################################################################
1770         # load input
1771         vmovdqu         16*0($inp),%x#$T0
1772         vmovdqu         16*1($inp),%x#$T1
1773         vinserti128     \$1,16*2($inp),$T0,$T0
1774         vinserti128     \$1,16*3($inp),$T1,$T1
1775         lea             16*4($inp),$inp
1776
1777         vpsrldq         \$6,$T0,$T2             # splat input
1778         vpsrldq         \$6,$T1,$T3
1779         vpunpckhqdq     $T1,$T0,$T4             # 4
1780         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1781         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1782
1783         vpsrlq          \$30,$T2,$T3
1784         vpsrlq          \$4,$T2,$T2
1785         vpsrlq          \$26,$T0,$T1
1786         vpsrlq          \$40,$T4,$T4            # 4
1787         vpand           $MASK,$T2,$T2           # 2
1788         vpand           $MASK,$T0,$T0           # 0
1789         vpand           $MASK,$T1,$T1           # 1
1790         vpand           $MASK,$T3,$T3           # 3
1791         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1792
1793         vpaddq          $H2,$T2,$H2             # accumulate input
1794         sub             \$64,$len
1795         jz              .Ltail_avx2
1796         jmp             .Loop_avx2
1797
1798 .align  32
1799 .Loop_avx2:
1800         ################################################################
1801         # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1802         # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1803         # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1804         # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1805         #   \________/\__________/
1806         ################################################################
1807         #vpaddq         $H2,$T2,$H2             # accumulate input
1808         vpaddq          $H0,$T0,$H0
1809         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1810         vpaddq          $H1,$T1,$H1
1811         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1812         vpaddq          $H3,$T3,$H3
1813         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1814         vpaddq          $H4,$T4,$H4
1815         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1816         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1817
1818         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1819         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1820         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1821         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1822         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1823         #
1824         # however, as h2 is "chronologically" first one available pull
1825         # corresponding operations up, so it's
1826         #
1827         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1828         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1829         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1830         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1831         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1832
1833         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1834         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1835         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1836         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1837         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1838
1839         vpmuludq        $H0,$T1,$T4             # h0*r1
1840         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1841         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1842         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1843         vpmuludq        $H3,$T1,$T4             # h3*r1
1844         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1845         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1846         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1847          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1848
1849         vpmuludq        $H0,$T0,$T4             # h0*r0
1850         vpmuludq        $H1,$T0,$H2             # h1*r0
1851         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1852         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1853         vpmuludq        $H3,$T0,$T4             # h3*r0
1854         vpmuludq        $H4,$T0,$H2             # h4*r0
1855          vmovdqu        16*0($inp),%x#$T0       # load input
1856         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1857         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1858          vinserti128    \$1,16*2($inp),$T0,$T0
1859
1860         vpmuludq        $H3,$T1,$T4             # h3*s2
1861         vpmuludq        $H4,$T1,$H2             # h4*s2
1862          vmovdqu        16*1($inp),%x#$T1
1863         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1864         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1865          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1866         vpmuludq        $H1,$T2,$T4             # h1*r2
1867         vpmuludq        $H0,$T2,$T2             # h0*r2
1868         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1869         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1870          vinserti128    \$1,16*3($inp),$T1,$T1
1871          lea            16*4($inp),$inp
1872
1873         vpmuludq        $H1,$H2,$T4             # h1*r3
1874         vpmuludq        $H0,$H2,$H2             # h0*r3
1875          vpsrldq        \$6,$T0,$T2             # splat input
1876         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1877         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1878         vpmuludq        $H3,$T3,$T4             # h3*s3
1879         vpmuludq        $H4,$T3,$H2             # h4*s3
1880          vpsrldq        \$6,$T1,$T3
1881         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1882         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1883          vpunpckhqdq    $T1,$T0,$T4             # 4
1884
1885         vpmuludq        $H3,$S4,$H3             # h3*s4
1886         vpmuludq        $H4,$S4,$H4             # h4*s4
1887          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1888         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1889         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1890          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1891         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1892         vpmuludq        $H1,$S4,$H0             # h1*s4
1893         vmovdqa         64(%rcx),$MASK          # .Lmask26
1894         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1895         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1896
1897         ################################################################
1898         # lazy reduction (interleaved with tail of input splat)
1899
1900         vpsrlq          \$26,$H3,$D3
1901         vpand           $MASK,$H3,$H3
1902         vpaddq          $D3,$H4,$H4             # h3 -> h4
1903
1904         vpsrlq          \$26,$H0,$D0
1905         vpand           $MASK,$H0,$H0
1906         vpaddq          $D0,$D1,$H1             # h0 -> h1
1907
1908         vpsrlq          \$26,$H4,$D4
1909         vpand           $MASK,$H4,$H4
1910
1911          vpsrlq         \$4,$T3,$T2
1912
1913         vpsrlq          \$26,$H1,$D1
1914         vpand           $MASK,$H1,$H1
1915         vpaddq          $D1,$H2,$H2             # h1 -> h2
1916
1917         vpaddq          $D4,$H0,$H0
1918         vpsllq          \$2,$D4,$D4
1919         vpaddq          $D4,$H0,$H0             # h4 -> h0
1920
1921          vpand          $MASK,$T2,$T2           # 2
1922          vpsrlq         \$26,$T0,$T1
1923
1924         vpsrlq          \$26,$H2,$D2
1925         vpand           $MASK,$H2,$H2
1926         vpaddq          $D2,$H3,$H3             # h2 -> h3
1927
1928          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1929          vpsrlq         \$30,$T3,$T3
1930
1931         vpsrlq          \$26,$H0,$D0
1932         vpand           $MASK,$H0,$H0
1933         vpaddq          $D0,$H1,$H1             # h0 -> h1
1934
1935          vpsrlq         \$40,$T4,$T4            # 4
1936
1937         vpsrlq          \$26,$H3,$D3
1938         vpand           $MASK,$H3,$H3
1939         vpaddq          $D3,$H4,$H4             # h3 -> h4
1940
1941          vpand          $MASK,$T0,$T0           # 0
1942          vpand          $MASK,$T1,$T1           # 1
1943          vpand          $MASK,$T3,$T3           # 3
1944          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1945
1946         sub             \$64,$len
1947         jnz             .Loop_avx2
1948
1949         .byte           0x66,0x90
1950 .Ltail_avx2:
1951         ################################################################
1952         # while above multiplications were by r^4 in all lanes, in last
1953         # iteration we multiply least significant lane by r^4 and most
1954         # significant one by r, so copy of above except that references
1955         # to the precomputed table are displaced by 4...
1956
1957         #vpaddq         $H2,$T2,$H2             # accumulate input
1958         vpaddq          $H0,$T0,$H0
1959         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1960         vpaddq          $H1,$T1,$H1
1961         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1962         vpaddq          $H3,$T3,$H3
1963         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1964         vpaddq          $H4,$T4,$H4
1965         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1966         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1967
1968         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1969         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1970         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1971         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1972         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1973
1974         vpmuludq        $H0,$T1,$T4             # h0*r1
1975         vpmuludq        $H1,$T1,$H2             # h1*r1
1976         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1977         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1978         vpmuludq        $H3,$T1,$T4             # h3*r1
1979         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1980         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1981         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1982
1983         vpmuludq        $H0,$T0,$T4             # h0*r0
1984         vpmuludq        $H1,$T0,$H2             # h1*r0
1985         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1986          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1987         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1988         vpmuludq        $H3,$T0,$T4             # h3*r0
1989         vpmuludq        $H4,$T0,$H2             # h4*r0
1990         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1991         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1992
1993         vpmuludq        $H3,$T1,$T4             # h3*s2
1994         vpmuludq        $H4,$T1,$H2             # h4*s2
1995         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1996         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1997          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1998         vpmuludq        $H1,$T2,$T4             # h1*r2
1999         vpmuludq        $H0,$T2,$T2             # h0*r2
2000         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
2001         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
2002
2003         vpmuludq        $H1,$H2,$T4             # h1*r3
2004         vpmuludq        $H0,$H2,$H2             # h0*r3
2005         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
2006         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
2007         vpmuludq        $H3,$T3,$T4             # h3*s3
2008         vpmuludq        $H4,$T3,$H2             # h4*s3
2009         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
2010         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
2011
2012         vpmuludq        $H3,$S4,$H3             # h3*s4
2013         vpmuludq        $H4,$S4,$H4             # h4*s4
2014         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
2015         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
2016         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
2017         vpmuludq        $H1,$S4,$H0             # h1*s4
2018         vmovdqa         64(%rcx),$MASK          # .Lmask26
2019         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
2020         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
2021
2022         ################################################################
2023         # horizontal addition
2024
2025         vpsrldq         \$8,$D1,$T1
2026         vpsrldq         \$8,$H2,$T2
2027         vpsrldq         \$8,$H3,$T3
2028         vpsrldq         \$8,$H4,$T4
2029         vpsrldq         \$8,$H0,$T0
2030         vpaddq          $T1,$D1,$D1
2031         vpaddq          $T2,$H2,$H2
2032         vpaddq          $T3,$H3,$H3
2033         vpaddq          $T4,$H4,$H4
2034         vpaddq          $T0,$H0,$H0
2035
2036         vpermq          \$0x2,$H3,$T3
2037         vpermq          \$0x2,$H4,$T4
2038         vpermq          \$0x2,$H0,$T0
2039         vpermq          \$0x2,$D1,$T1
2040         vpermq          \$0x2,$H2,$T2
2041         vpaddq          $T3,$H3,$H3
2042         vpaddq          $T4,$H4,$H4
2043         vpaddq          $T0,$H0,$H0
2044         vpaddq          $T1,$D1,$D1
2045         vpaddq          $T2,$H2,$H2
2046
2047         ################################################################
2048         # lazy reduction
2049
2050         vpsrlq          \$26,$H3,$D3
2051         vpand           $MASK,$H3,$H3
2052         vpaddq          $D3,$H4,$H4             # h3 -> h4
2053
2054         vpsrlq          \$26,$H0,$D0
2055         vpand           $MASK,$H0,$H0
2056         vpaddq          $D0,$D1,$H1             # h0 -> h1
2057
2058         vpsrlq          \$26,$H4,$D4
2059         vpand           $MASK,$H4,$H4
2060
2061         vpsrlq          \$26,$H1,$D1
2062         vpand           $MASK,$H1,$H1
2063         vpaddq          $D1,$H2,$H2             # h1 -> h2
2064
2065         vpaddq          $D4,$H0,$H0
2066         vpsllq          \$2,$D4,$D4
2067         vpaddq          $D4,$H0,$H0             # h4 -> h0
2068
2069         vpsrlq          \$26,$H2,$D2
2070         vpand           $MASK,$H2,$H2
2071         vpaddq          $D2,$H3,$H3             # h2 -> h3
2072
2073         vpsrlq          \$26,$H0,$D0
2074         vpand           $MASK,$H0,$H0
2075         vpaddq          $D0,$H1,$H1             # h0 -> h1
2076
2077         vpsrlq          \$26,$H3,$D3
2078         vpand           $MASK,$H3,$H3
2079         vpaddq          $D3,$H4,$H4             # h3 -> h4
2080
2081         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2082         vmovd           %x#$H1,`4*1-48-64`($ctx)
2083         vmovd           %x#$H2,`4*2-48-64`($ctx)
2084         vmovd           %x#$H3,`4*3-48-64`($ctx)
2085         vmovd           %x#$H4,`4*4-48-64`($ctx)
2086 ___
2087 $code.=<<___    if ($win64);
2088         vmovdqa         0x50(%r11),%xmm6
2089         vmovdqa         0x60(%r11),%xmm7
2090         vmovdqa         0x70(%r11),%xmm8
2091         vmovdqa         0x80(%r11),%xmm9
2092         vmovdqa         0x90(%r11),%xmm10
2093         vmovdqa         0xa0(%r11),%xmm11
2094         vmovdqa         0xb0(%r11),%xmm12
2095         vmovdqa         0xc0(%r11),%xmm13
2096         vmovdqa         0xd0(%r11),%xmm14
2097         vmovdqa         0xe0(%r11),%xmm15
2098         lea             0xf8(%r11),%rsp
2099 .Ldo_avx2_epilogue:
2100 ___
2101 $code.=<<___    if (!$win64);
2102         lea             8(%r11),%rsp
2103 .cfi_def_cfa            %rsp,8
2104 ___
2105 $code.=<<___;
2106         vzeroupper
2107         ret
2108 .cfi_endproc
2109 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
2110 ___
2111 #######################################################################
2112 if ($avx>2) {
2113 # On entry we have input length divisible by 64. But since inner loop
2114 # processes 128 bytes per iteration, cases when length is not divisible
2115 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2116 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2117 # for this tail, we wouldn't have to even allocate stack frame...
2118
2119 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
2120 my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
2121 my $PADBIT="%zmm30";
2122
2123 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));            # switch to %zmm domain
2124 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2125 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2126 map(s/%y/%z/,($MASK));
2127
2128 $code.=<<___;
2129 .type   poly1305_blocks_avx512,\@function,4
2130 .align  32
2131 poly1305_blocks_avx512:
2132 .cfi_startproc
2133 .Lblocks_avx512:
2134         mov             \$15,%eax
2135         kmovw           %eax,%k2
2136 ___
2137 $code.=<<___    if (!$win64);
2138         lea             -8(%rsp),%r11
2139 .cfi_def_cfa            %r11,16
2140         sub             \$0x128,%rsp
2141 ___
2142 $code.=<<___    if ($win64);
2143         lea             -0xf8(%rsp),%r11
2144         sub             \$0x1c8,%rsp
2145         vmovdqa         %xmm6,0x50(%r11)
2146         vmovdqa         %xmm7,0x60(%r11)
2147         vmovdqa         %xmm8,0x70(%r11)
2148         vmovdqa         %xmm9,0x80(%r11)
2149         vmovdqa         %xmm10,0x90(%r11)
2150         vmovdqa         %xmm11,0xa0(%r11)
2151         vmovdqa         %xmm12,0xb0(%r11)
2152         vmovdqa         %xmm13,0xc0(%r11)
2153         vmovdqa         %xmm14,0xd0(%r11)
2154         vmovdqa         %xmm15,0xe0(%r11)
2155 .Ldo_avx512_body:
2156 ___
2157 $code.=<<___;
2158         lea             .Lconst(%rip),%rcx
2159         lea             48+64($ctx),$ctx        # size optimization
2160         vmovdqa         96(%rcx),%y#$T2         # .Lpermd_avx2
2161
2162         # expand pre-calculated table
2163         vmovdqu32       `16*0-64`($ctx),${R0}{%k2}{z}
2164         and             \$-512,%rsp
2165         vmovdqu32       `16*1-64`($ctx),${R1}{%k2}{z}
2166         mov             \$0x20,%rax
2167         vmovdqu32       `16*2-64`($ctx),${S1}{%k2}{z}
2168         vmovdqu32       `16*3-64`($ctx),${R2}{%k2}{z}
2169         vmovdqu32       `16*4-64`($ctx),${S2}{%k2}{z}
2170         vmovdqu32       `16*5-64`($ctx),${R3}{%k2}{z}
2171         vmovdqu32       `16*6-64`($ctx),${S3}{%k2}{z}
2172         vmovdqu32       `16*7-64`($ctx),${R4}{%k2}{z}
2173         vmovdqu32       `16*8-64`($ctx),${S4}{%k2}{z}
2174         vpermd          $R0,$T2,$R0             # 00003412 -> 14243444
2175         vpbroadcastq    64(%rcx),$MASK          # .Lmask26
2176         vpermd          $R1,$T2,$R1
2177         vpermd          $S1,$T2,$S1
2178         vpermd          $R2,$T2,$R2
2179         vmovdqa64       $R0,0x00(%rsp){%k2}     # save in case $len%128 != 0
2180          vpsrlq         \$32,$R0,$T0            # 14243444 -> 01020304
2181         vpermd          $S2,$T2,$S2
2182         vmovdqu64       $R1,0x00(%rsp,%rax){%k2}
2183          vpsrlq         \$32,$R1,$T1
2184         vpermd          $R3,$T2,$R3
2185         vmovdqa64       $S1,0x40(%rsp){%k2}
2186         vpermd          $S3,$T2,$S3
2187         vpermd          $R4,$T2,$R4
2188         vmovdqu64       $R2,0x40(%rsp,%rax){%k2}
2189         vpermd          $S4,$T2,$S4
2190         vmovdqa64       $S2,0x80(%rsp){%k2}
2191         vmovdqu64       $R3,0x80(%rsp,%rax){%k2}
2192         vmovdqa64       $S3,0xc0(%rsp){%k2}
2193         vmovdqu64       $R4,0xc0(%rsp,%rax){%k2}
2194         vmovdqa64       $S4,0x100(%rsp){%k2}
2195
2196         ################################################################
2197         # calculate 5th through 8th powers of the key
2198         #
2199         # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2200         # d1 = r0'*r1 + r1'*r0   + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2201         # d2 = r0'*r2 + r1'*r1   + r2'*r0   + r3'*5*r4 + r4'*5*r3
2202         # d3 = r0'*r3 + r1'*r2   + r2'*r1   + r3'*r0   + r4'*5*r4
2203         # d4 = r0'*r4 + r1'*r3   + r2'*r2   + r3'*r1   + r4'*r0
2204
2205         vpmuludq        $T0,$R0,$D0             # d0 = r0'*r0
2206         vpmuludq        $T0,$R1,$D1             # d1 = r0'*r1
2207         vpmuludq        $T0,$R2,$D2             # d2 = r0'*r2
2208         vpmuludq        $T0,$R3,$D3             # d3 = r0'*r3
2209         vpmuludq        $T0,$R4,$D4             # d4 = r0'*r4
2210          vpsrlq         \$32,$R2,$T2
2211
2212         vpmuludq        $T1,$S4,$M0
2213         vpmuludq        $T1,$R0,$M1
2214         vpmuludq        $T1,$R1,$M2
2215         vpmuludq        $T1,$R2,$M3
2216         vpmuludq        $T1,$R3,$M4
2217          vpsrlq         \$32,$R3,$T3
2218         vpaddq          $M0,$D0,$D0             # d0 += r1'*5*r4
2219         vpaddq          $M1,$D1,$D1             # d1 += r1'*r0
2220         vpaddq          $M2,$D2,$D2             # d2 += r1'*r1
2221         vpaddq          $M3,$D3,$D3             # d3 += r1'*r2
2222         vpaddq          $M4,$D4,$D4             # d4 += r1'*r3
2223
2224         vpmuludq        $T2,$S3,$M0
2225         vpmuludq        $T2,$S4,$M1
2226         vpmuludq        $T2,$R1,$M3
2227         vpmuludq        $T2,$R2,$M4
2228         vpmuludq        $T2,$R0,$M2
2229          vpsrlq         \$32,$R4,$T4
2230         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r3
2231         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r4
2232         vpaddq          $M3,$D3,$D3             # d3 += r2'*r1
2233         vpaddq          $M4,$D4,$D4             # d4 += r2'*r2
2234         vpaddq          $M2,$D2,$D2             # d2 += r2'*r0
2235
2236         vpmuludq        $T3,$S2,$M0
2237         vpmuludq        $T3,$R0,$M3
2238         vpmuludq        $T3,$R1,$M4
2239         vpmuludq        $T3,$S3,$M1
2240         vpmuludq        $T3,$S4,$M2
2241         vpaddq          $M0,$D0,$D0             # d0 += r3'*5*r2
2242         vpaddq          $M3,$D3,$D3             # d3 += r3'*r0
2243         vpaddq          $M4,$D4,$D4             # d4 += r3'*r1
2244         vpaddq          $M1,$D1,$D1             # d1 += r3'*5*r3
2245         vpaddq          $M2,$D2,$D2             # d2 += r3'*5*r4
2246
2247         vpmuludq        $T4,$S4,$M3
2248         vpmuludq        $T4,$R0,$M4
2249         vpmuludq        $T4,$S1,$M0
2250         vpmuludq        $T4,$S2,$M1
2251         vpmuludq        $T4,$S3,$M2
2252         vpaddq          $M3,$D3,$D3             # d3 += r2'*5*r4
2253         vpaddq          $M4,$D4,$D4             # d4 += r2'*r0
2254         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r1
2255         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r2
2256         vpaddq          $M2,$D2,$D2             # d2 += r2'*5*r3
2257
2258         ################################################################
2259         # load input
2260         vmovdqu64       16*0($inp),%z#$T3
2261         vmovdqu64       16*4($inp),%z#$T4
2262         lea             16*8($inp),$inp
2263
2264         ################################################################
2265         # lazy reduction
2266
2267         vpsrlq          \$26,$D3,$M3
2268         vpandq          $MASK,$D3,$D3
2269         vpaddq          $M3,$D4,$D4             # d3 -> d4
2270
2271         vpsrlq          \$26,$D0,$M0
2272         vpandq          $MASK,$D0,$D0
2273         vpaddq          $M0,$D1,$D1             # d0 -> d1
2274
2275         vpsrlq          \$26,$D4,$M4
2276         vpandq          $MASK,$D4,$D4
2277
2278         vpsrlq          \$26,$D1,$M1
2279         vpandq          $MASK,$D1,$D1
2280         vpaddq          $M1,$D2,$D2             # d1 -> d2
2281
2282         vpaddq          $M4,$D0,$D0
2283         vpsllq          \$2,$M4,$M4
2284         vpaddq          $M4,$D0,$D0             # d4 -> d0
2285
2286         vpsrlq          \$26,$D2,$M2
2287         vpandq          $MASK,$D2,$D2
2288         vpaddq          $M2,$D3,$D3             # d2 -> d3
2289
2290         vpsrlq          \$26,$D0,$M0
2291         vpandq          $MASK,$D0,$D0
2292         vpaddq          $M0,$D1,$D1             # d0 -> d1
2293
2294         vpsrlq          \$26,$D3,$M3
2295         vpandq          $MASK,$D3,$D3
2296         vpaddq          $M3,$D4,$D4             # d3 -> d4
2297
2298         ################################################################
2299         # at this point we have 14243444 in $R0-$S4 and 05060708 in
2300         # $D0-$D4, ...
2301
2302         vpunpcklqdq     $T4,$T3,$T0     # transpose input
2303         vpunpckhqdq     $T4,$T3,$T4
2304
2305         # ... since input 64-bit lanes are ordered as 73625140, we could
2306         # "vperm" it to 76543210 (here and in each loop iteration), *or*
2307         # we could just flow along, hence the goal for $R0-$S4 is
2308         # 1858286838784888 ...
2309
2310         vmovdqa32       128(%rcx),$M0           # .Lpermd_avx512:
2311         mov             \$0x7777,%eax
2312         kmovw           %eax,%k1
2313
2314         vpermd          $R0,$M0,$R0             # 14243444 -> 1---2---3---4---
2315         vpermd          $R1,$M0,$R1
2316         vpermd          $R2,$M0,$R2
2317         vpermd          $R3,$M0,$R3
2318         vpermd          $R4,$M0,$R4
2319
2320         vpermd          $D0,$M0,${R0}{%k1}      # 05060708 -> 1858286838784888
2321         vpermd          $D1,$M0,${R1}{%k1}
2322         vpermd          $D2,$M0,${R2}{%k1}
2323         vpermd          $D3,$M0,${R3}{%k1}
2324         vpermd          $D4,$M0,${R4}{%k1}
2325
2326         vpslld          \$2,$R1,$S1             # *5
2327         vpslld          \$2,$R2,$S2
2328         vpslld          \$2,$R3,$S3
2329         vpslld          \$2,$R4,$S4
2330         vpaddd          $R1,$S1,$S1
2331         vpaddd          $R2,$S2,$S2
2332         vpaddd          $R3,$S3,$S3
2333         vpaddd          $R4,$S4,$S4
2334
2335         vpbroadcastq    32(%rcx),$PADBIT        # .L129
2336
2337         vpsrlq          \$52,$T0,$T2            # splat input
2338         vpsllq          \$12,$T4,$T3
2339         vporq           $T3,$T2,$T2
2340         vpsrlq          \$26,$T0,$T1
2341         vpsrlq          \$14,$T4,$T3
2342         vpsrlq          \$40,$T4,$T4            # 4
2343         vpandq          $MASK,$T2,$T2           # 2
2344         vpandq          $MASK,$T0,$T0           # 0
2345         #vpandq         $MASK,$T1,$T1           # 1
2346         #vpandq         $MASK,$T3,$T3           # 3
2347         #vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2348
2349         vpaddq          $H2,$T2,$H2             # accumulate input
2350         sub             \$192,$len
2351         jbe             .Ltail_avx512
2352         jmp             .Loop_avx512
2353
2354 .align  32
2355 .Loop_avx512:
2356         ################################################################
2357         # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2358         # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2359         # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2360         # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2361         # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2362         # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2363         # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2364         # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2365         #   \________/\___________/
2366         ################################################################
2367         #vpaddq         $H2,$T2,$H2             # accumulate input
2368
2369         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
2370         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
2371         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
2372         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
2373         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2374         #
2375         # however, as h2 is "chronologically" first one available pull
2376         # corresponding operations up, so it's
2377         #
2378         # d3 = h2*r1   + h0*r3 + h1*r2   + h3*r0 + h4*5*r4
2379         # d4 = h2*r2   + h0*r4 + h1*r3   + h3*r1 + h4*r0
2380         # d0 = h2*5*r3 + h0*r0 + h1*5*r4         + h3*5*r2 + h4*5*r1
2381         # d1 = h2*5*r4 + h0*r1           + h1*r0 + h3*5*r3 + h4*5*r2
2382         # d2 = h2*r0           + h0*r2   + h1*r1 + h3*5*r4 + h4*5*r3
2383
2384         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2385          vpaddq         $H0,$T0,$H0
2386         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2387          vpandq         $MASK,$T1,$T1           # 1
2388         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2389          vpandq         $MASK,$T3,$T3           # 3
2390         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2391          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2392         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2393          vpaddq         $H1,$T1,$H1             # accumulate input
2394          vpaddq         $H3,$T3,$H3
2395          vpaddq         $H4,$T4,$H4
2396
2397           vmovdqu64     16*0($inp),$T3          # load input
2398           vmovdqu64     16*4($inp),$T4
2399           lea           16*8($inp),$inp
2400         vpmuludq        $H0,$R3,$M3
2401         vpmuludq        $H0,$R4,$M4
2402         vpmuludq        $H0,$R0,$M0
2403         vpmuludq        $H0,$R1,$M1
2404         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2405         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2406         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2407         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2408
2409         vpmuludq        $H1,$R2,$M3
2410         vpmuludq        $H1,$R3,$M4
2411         vpmuludq        $H1,$S4,$M0
2412         vpmuludq        $H0,$R2,$M2
2413         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2414         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2415         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2416         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2417
2418           vpunpcklqdq   $T4,$T3,$T0             # transpose input
2419           vpunpckhqdq   $T4,$T3,$T4
2420
2421         vpmuludq        $H3,$R0,$M3
2422         vpmuludq        $H3,$R1,$M4
2423         vpmuludq        $H1,$R0,$M1
2424         vpmuludq        $H1,$R1,$M2
2425         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2426         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2427         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2428         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2429
2430         vpmuludq        $H4,$S4,$M3
2431         vpmuludq        $H4,$R0,$M4
2432         vpmuludq        $H3,$S2,$M0
2433         vpmuludq        $H3,$S3,$M1
2434         vpaddq          $M3,$D3,$D3             # d3 += h4*s4
2435         vpmuludq        $H3,$S4,$M2
2436         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2437         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2438         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2439         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2440
2441         vpmuludq        $H4,$S1,$M0
2442         vpmuludq        $H4,$S2,$M1
2443         vpmuludq        $H4,$S3,$M2
2444         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2445         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2446         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2447
2448         ################################################################
2449         # lazy reduction (interleaved with input splat)
2450
2451          vpsrlq         \$52,$T0,$T2            # splat input
2452          vpsllq         \$12,$T4,$T3
2453
2454         vpsrlq          \$26,$D3,$H3
2455         vpandq          $MASK,$D3,$D3
2456         vpaddq          $H3,$D4,$H4             # h3 -> h4
2457
2458          vporq          $T3,$T2,$T2
2459
2460         vpsrlq          \$26,$H0,$D0
2461         vpandq          $MASK,$H0,$H0
2462         vpaddq          $D0,$H1,$H1             # h0 -> h1
2463
2464          vpandq         $MASK,$T2,$T2           # 2
2465
2466         vpsrlq          \$26,$H4,$D4
2467         vpandq          $MASK,$H4,$H4
2468
2469         vpsrlq          \$26,$H1,$D1
2470         vpandq          $MASK,$H1,$H1
2471         vpaddq          $D1,$H2,$H2             # h1 -> h2
2472
2473         vpaddq          $D4,$H0,$H0
2474         vpsllq          \$2,$D4,$D4
2475         vpaddq          $D4,$H0,$H0             # h4 -> h0
2476
2477          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2478          vpsrlq         \$26,$T0,$T1
2479
2480         vpsrlq          \$26,$H2,$D2
2481         vpandq          $MASK,$H2,$H2
2482         vpaddq          $D2,$D3,$H3             # h2 -> h3
2483
2484          vpsrlq         \$14,$T4,$T3
2485
2486         vpsrlq          \$26,$H0,$D0
2487         vpandq          $MASK,$H0,$H0
2488         vpaddq          $D0,$H1,$H1             # h0 -> h1
2489
2490          vpsrlq         \$40,$T4,$T4            # 4
2491
2492         vpsrlq          \$26,$H3,$D3
2493         vpandq          $MASK,$H3,$H3
2494         vpaddq          $D3,$H4,$H4             # h3 -> h4
2495
2496          vpandq         $MASK,$T0,$T0           # 0
2497          #vpandq        $MASK,$T1,$T1           # 1
2498          #vpandq        $MASK,$T3,$T3           # 3
2499          #vporq         $PADBIT,$T4,$T4         # padbit, yes, always
2500
2501         sub             \$128,$len
2502         ja              .Loop_avx512
2503
2504 .Ltail_avx512:
2505         ################################################################
2506         # while above multiplications were by r^8 in all lanes, in last
2507         # iteration we multiply least significant lane by r^8 and most
2508         # significant one by r, that's why table gets shifted...
2509
2510         vpsrlq          \$32,$R0,$R0            # 0105020603070408
2511         vpsrlq          \$32,$R1,$R1
2512         vpsrlq          \$32,$R2,$R2
2513         vpsrlq          \$32,$S3,$S3
2514         vpsrlq          \$32,$S4,$S4
2515         vpsrlq          \$32,$R3,$R3
2516         vpsrlq          \$32,$R4,$R4
2517         vpsrlq          \$32,$S1,$S1
2518         vpsrlq          \$32,$S2,$S2
2519
2520         ################################################################
2521         # load either next or last 64 byte of input
2522         lea             ($inp,$len),$inp
2523
2524         #vpaddq         $H2,$T2,$H2             # accumulate input
2525         vpaddq          $H0,$T0,$H0
2526
2527         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2528         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2529         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2530          vpandq         $MASK,$T1,$T1           # 1
2531         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2532          vpandq         $MASK,$T3,$T3           # 3
2533         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2534          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2535          vpaddq         $H1,$T1,$H1             # accumulate input
2536          vpaddq         $H3,$T3,$H3
2537          vpaddq         $H4,$T4,$H4
2538
2539           vmovdqu       16*0($inp),%x#$T0
2540         vpmuludq        $H0,$R3,$M3
2541         vpmuludq        $H0,$R4,$M4
2542         vpmuludq        $H0,$R0,$M0
2543         vpmuludq        $H0,$R1,$M1
2544         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2545         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2546         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2547         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2548
2549           vmovdqu       16*1($inp),%x#$T1
2550         vpmuludq        $H1,$R2,$M3
2551         vpmuludq        $H1,$R3,$M4
2552         vpmuludq        $H1,$S4,$M0
2553         vpmuludq        $H0,$R2,$M2
2554         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2555         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2556         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2557         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2558
2559           vinserti128   \$1,16*2($inp),%y#$T0,%y#$T0
2560         vpmuludq        $H3,$R0,$M3
2561         vpmuludq        $H3,$R1,$M4
2562         vpmuludq        $H1,$R0,$M1
2563         vpmuludq        $H1,$R1,$M2
2564         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2565         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2566         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2567         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2568
2569           vinserti128   \$1,16*3($inp),%y#$T1,%y#$T1
2570         vpmuludq        $H4,$S4,$M3
2571         vpmuludq        $H4,$R0,$M4
2572         vpmuludq        $H3,$S2,$M0
2573         vpmuludq        $H3,$S3,$M1
2574         vpmuludq        $H3,$S4,$M2
2575         vpaddq          $M3,$D3,$H3             # h3 = d3 + h4*s4
2576         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2577         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2578         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2579         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2580
2581         vpmuludq        $H4,$S1,$M0
2582         vpmuludq        $H4,$S2,$M1
2583         vpmuludq        $H4,$S3,$M2
2584         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2585         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2586         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2587
2588         ################################################################
2589         # horizontal addition
2590
2591         mov             \$1,%eax
2592         vpermq          \$0xb1,$H3,$D3
2593         vpermq          \$0xb1,$D4,$H4
2594         vpermq          \$0xb1,$H0,$D0
2595         vpermq          \$0xb1,$H1,$D1
2596         vpermq          \$0xb1,$H2,$D2
2597         vpaddq          $D3,$H3,$H3
2598         vpaddq          $D4,$H4,$H4
2599         vpaddq          $D0,$H0,$H0
2600         vpaddq          $D1,$H1,$H1
2601         vpaddq          $D2,$H2,$H2
2602
2603         kmovw           %eax,%k3
2604         vpermq          \$0x2,$H3,$D3
2605         vpermq          \$0x2,$H4,$D4
2606         vpermq          \$0x2,$H0,$D0
2607         vpermq          \$0x2,$H1,$D1
2608         vpermq          \$0x2,$H2,$D2
2609         vpaddq          $D3,$H3,$H3
2610         vpaddq          $D4,$H4,$H4
2611         vpaddq          $D0,$H0,$H0
2612         vpaddq          $D1,$H1,$H1
2613         vpaddq          $D2,$H2,$H2
2614
2615         vextracti64x4   \$0x1,$H3,%y#$D3
2616         vextracti64x4   \$0x1,$H4,%y#$D4
2617         vextracti64x4   \$0x1,$H0,%y#$D0
2618         vextracti64x4   \$0x1,$H1,%y#$D1
2619         vextracti64x4   \$0x1,$H2,%y#$D2
2620         vpaddq          $D3,$H3,${H3}{%k3}{z}   # keep single qword in case
2621         vpaddq          $D4,$H4,${H4}{%k3}{z}   # it's passed to .Ltail_avx2
2622         vpaddq          $D0,$H0,${H0}{%k3}{z}
2623         vpaddq          $D1,$H1,${H1}{%k3}{z}
2624         vpaddq          $D2,$H2,${H2}{%k3}{z}
2625 ___
2626 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2627 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2628 $code.=<<___;
2629         ################################################################
2630         # lazy reduction (interleaved with input splat)
2631
2632         vpsrlq          \$26,$H3,$D3
2633         vpand           $MASK,$H3,$H3
2634          vpsrldq        \$6,$T0,$T2             # splat input
2635          vpsrldq        \$6,$T1,$T3
2636          vpunpckhqdq    $T1,$T0,$T4             # 4
2637         vpaddq          $D3,$H4,$H4             # h3 -> h4
2638
2639         vpsrlq          \$26,$H0,$D0
2640         vpand           $MASK,$H0,$H0
2641          vpunpcklqdq    $T3,$T2,$T2             # 2:3
2642          vpunpcklqdq    $T1,$T0,$T0             # 0:1
2643         vpaddq          $D0,$H1,$H1             # h0 -> h1
2644
2645         vpsrlq          \$26,$H4,$D4
2646         vpand           $MASK,$H4,$H4
2647
2648         vpsrlq          \$26,$H1,$D1
2649         vpand           $MASK,$H1,$H1
2650          vpsrlq         \$30,$T2,$T3
2651          vpsrlq         \$4,$T2,$T2
2652         vpaddq          $D1,$H2,$H2             # h1 -> h2
2653
2654         vpaddq          $D4,$H0,$H0
2655         vpsllq          \$2,$D4,$D4
2656          vpsrlq         \$26,$T0,$T1
2657          vpsrlq         \$40,$T4,$T4            # 4
2658         vpaddq          $D4,$H0,$H0             # h4 -> h0
2659
2660         vpsrlq          \$26,$H2,$D2
2661         vpand           $MASK,$H2,$H2
2662          vpand          $MASK,$T2,$T2           # 2
2663          vpand          $MASK,$T0,$T0           # 0
2664         vpaddq          $D2,$H3,$H3             # h2 -> h3
2665
2666         vpsrlq          \$26,$H0,$D0
2667         vpand           $MASK,$H0,$H0
2668          vpaddq         $H2,$T2,$H2             # accumulate input for .Ltail_avx2
2669          vpand          $MASK,$T1,$T1           # 1
2670         vpaddq          $D0,$H1,$H1             # h0 -> h1
2671
2672         vpsrlq          \$26,$H3,$D3
2673         vpand           $MASK,$H3,$H3
2674          vpand          $MASK,$T3,$T3           # 3
2675          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
2676         vpaddq          $D3,$H4,$H4             # h3 -> h4
2677
2678         lea             0x90(%rsp),%rax         # size optimization for .Ltail_avx2
2679         add             \$64,$len
2680         jnz             .Ltail_avx2
2681
2682         vpsubq          $T2,$H2,$H2             # undo input accumulation
2683         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2684         vmovd           %x#$H1,`4*1-48-64`($ctx)
2685         vmovd           %x#$H2,`4*2-48-64`($ctx)
2686         vmovd           %x#$H3,`4*3-48-64`($ctx)
2687         vmovd           %x#$H4,`4*4-48-64`($ctx)
2688         vzeroall
2689 ___
2690 $code.=<<___    if ($win64);
2691         movdqa          0x50(%r11),%xmm6
2692         movdqa          0x60(%r11),%xmm7
2693         movdqa          0x70(%r11),%xmm8
2694         movdqa          0x80(%r11),%xmm9
2695         movdqa          0x90(%r11),%xmm10
2696         movdqa          0xa0(%r11),%xmm11
2697         movdqa          0xb0(%r11),%xmm12
2698         movdqa          0xc0(%r11),%xmm13
2699         movdqa          0xd0(%r11),%xmm14
2700         movdqa          0xe0(%r11),%xmm15
2701         lea             0xf8(%r11),%rsp
2702 .Ldo_avx512_epilogue:
2703 ___
2704 $code.=<<___    if (!$win64);
2705         lea             8(%r11),%rsp
2706 .cfi_def_cfa            %rsp,8
2707 ___
2708 $code.=<<___;
2709         ret
2710 .cfi_endproc
2711 .size   poly1305_blocks_avx512,.-poly1305_blocks_avx512
2712 ___
2713 if ($avx>3) {
2714 ########################################################################
2715 # VPMADD52 version using 2^44 radix.
2716 #
2717 # One can argue that base 2^52 would be more natural. Well, even though
2718 # some operations would be more natural, one has to recognize couple of
2719 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2720 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2721 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2722 # reference implementations], which means that more such operations
2723 # would have to be performed in inner loop, which in turn makes critical
2724 # path longer. In other words, even though base 2^44 reduction might
2725 # look less elegant, overall critical path is actually shorter...
2726
2727 ########################################################################
2728 # Layout of opaque area is following.
2729 #
2730 #       unsigned __int64 h[3];          # current hash value base 2^44
2731 #       unsigned __int64 s[2];          # key value*20 base 2^44
2732 #       unsigned __int64 r[3];          # key value base 2^44
2733 #       struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2734 #                                       # r^n positions reflect
2735 #                                       # placement in register, not
2736 #                                       # memory, R[3] is R[1]*20
2737
2738 $code.=<<___;
2739 .type   poly1305_init_base2_44,\@function,3
2740 .align  32
2741 poly1305_init_base2_44:
2742         xor     %rax,%rax
2743         mov     %rax,0($ctx)            # initialize hash value
2744         mov     %rax,8($ctx)
2745         mov     %rax,16($ctx)
2746
2747 .Linit_base2_44:
2748         lea     poly1305_blocks_vpmadd52(%rip),%r10
2749         lea     poly1305_emit_base2_44(%rip),%r11
2750
2751         mov     \$0x0ffffffc0fffffff,%rax
2752         mov     \$0x0ffffffc0ffffffc,%rcx
2753         and     0($inp),%rax
2754         mov     \$0x00000fffffffffff,%r8
2755         and     8($inp),%rcx
2756         mov     \$0x00000fffffffffff,%r9
2757         and     %rax,%r8
2758         shrd    \$44,%rcx,%rax
2759         mov     %r8,40($ctx)            # r0
2760         and     %r9,%rax
2761         shr     \$24,%rcx
2762         mov     %rax,48($ctx)           # r1
2763         lea     (%rax,%rax,4),%rax      # *5
2764         mov     %rcx,56($ctx)           # r2
2765         shl     \$2,%rax                # magic <<2
2766         lea     (%rcx,%rcx,4),%rcx      # *5
2767         shl     \$2,%rcx                # magic <<2
2768         mov     %rax,24($ctx)           # s1
2769         mov     %rcx,32($ctx)           # s2
2770         movq    \$-1,64($ctx)           # write impossible value
2771 ___
2772 $code.=<<___    if ($flavour !~ /elf32/);
2773         mov     %r10,0(%rdx)
2774         mov     %r11,8(%rdx)
2775 ___
2776 $code.=<<___    if ($flavour =~ /elf32/);
2777         mov     %r10d,0(%rdx)
2778         mov     %r11d,4(%rdx)
2779 ___
2780 $code.=<<___;
2781         mov     \$1,%eax
2782         ret
2783 .size   poly1305_init_base2_44,.-poly1305_init_base2_44
2784 ___
2785 {
2786 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2787 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2788 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2789
2790 $code.=<<___;
2791 .type   poly1305_blocks_vpmadd52,\@function,4
2792 .align  32
2793 poly1305_blocks_vpmadd52:
2794         shr     \$4,$len
2795         jz      .Lno_data_vpmadd52              # too short
2796
2797         shl     \$40,$padbit
2798         mov     64($ctx),%r8                    # peek on power of the key
2799
2800         # if powers of the key are not calculated yet, process up to 3
2801         # blocks with this single-block subroutine, otherwise ensure that
2802         # length is divisible by 2 blocks and pass the rest down to next
2803         # subroutine...
2804
2805         mov     \$3,%rax
2806         mov     \$1,%r10
2807         cmp     \$4,$len                        # is input long
2808         cmovae  %r10,%rax
2809         test    %r8,%r8                         # is power value impossible?
2810         cmovns  %r10,%rax
2811
2812         and     $len,%rax                       # is input of favourable length?
2813         jz      .Lblocks_vpmadd52_4x
2814
2815         sub             %rax,$len
2816         mov             \$7,%r10d
2817         mov             \$1,%r11d
2818         kmovw           %r10d,%k7
2819         lea             .L2_44_inp_permd(%rip),%r10
2820         kmovw           %r11d,%k1
2821
2822         vmovq           $padbit,%x#$PAD
2823         vmovdqa64       0(%r10),$inp_permd      # .L2_44_inp_permd
2824         vmovdqa64       32(%r10),$inp_shift     # .L2_44_inp_shift
2825         vpermq          \$0xcf,$PAD,$PAD
2826         vmovdqa64       64(%r10),$reduc_mask    # .L2_44_mask
2827
2828         vmovdqu64       0($ctx),${Dlo}{%k7}{z}          # load hash value
2829         vmovdqu64       40($ctx),${r2r1r0}{%k7}{z}      # load keys
2830         vmovdqu64       32($ctx),${r1r0s2}{%k7}{z}
2831         vmovdqu64       24($ctx),${r0s2s1}{%k7}{z}
2832
2833         vmovdqa64       96(%r10),$reduc_rght    # .L2_44_shift_rgt
2834         vmovdqa64       128(%r10),$reduc_left   # .L2_44_shift_lft
2835
2836         jmp             .Loop_vpmadd52
2837
2838 .align  32
2839 .Loop_vpmadd52:
2840         vmovdqu32       0($inp),%x#$T0          # load input as ----3210
2841         lea             16($inp),$inp
2842
2843         vpermd          $T0,$inp_permd,$T0      # ----3210 -> --322110
2844         vpsrlvq         $inp_shift,$T0,$T0
2845         vpandq          $reduc_mask,$T0,$T0
2846         vporq           $PAD,$T0,$T0
2847
2848         vpaddq          $T0,$Dlo,$Dlo           # accumulate input
2849
2850         vpermq          \$0,$Dlo,${H0}{%k7}{z}  # smash hash value
2851         vpermq          \$0b01010101,$Dlo,${H1}{%k7}{z}
2852         vpermq          \$0b10101010,$Dlo,${H2}{%k7}{z}
2853
2854         vpxord          $Dlo,$Dlo,$Dlo
2855         vpxord          $Dhi,$Dhi,$Dhi
2856
2857         vpmadd52luq     $r2r1r0,$H0,$Dlo
2858         vpmadd52huq     $r2r1r0,$H0,$Dhi
2859
2860         vpmadd52luq     $r1r0s2,$H1,$Dlo
2861         vpmadd52huq     $r1r0s2,$H1,$Dhi
2862
2863         vpmadd52luq     $r0s2s1,$H2,$Dlo
2864         vpmadd52huq     $r0s2s1,$H2,$Dhi
2865
2866         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost qword
2867         vpsllvq         $reduc_left,$Dhi,$Dhi   # 0 in topmost qword
2868         vpandq          $reduc_mask,$Dlo,$Dlo
2869
2870         vpaddq          $T0,$Dhi,$Dhi
2871
2872         vpermq          \$0b10010011,$Dhi,$Dhi  # 0 in lowest qword
2873
2874         vpaddq          $Dhi,$Dlo,$Dlo          # note topmost qword :-)
2875
2876         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost word
2877         vpandq          $reduc_mask,$Dlo,$Dlo
2878
2879         vpermq          \$0b10010011,$T0,$T0
2880
2881         vpaddq          $T0,$Dlo,$Dlo
2882
2883         vpermq          \$0b10010011,$Dlo,${T0}{%k1}{z}
2884
2885         vpaddq          $T0,$Dlo,$Dlo
2886         vpsllq          \$2,$T0,$T0
2887
2888         vpaddq          $T0,$Dlo,$Dlo
2889
2890         dec             %rax                    # len-=16
2891         jnz             .Loop_vpmadd52
2892
2893         vmovdqu64       $Dlo,0($ctx){%k7}       # store hash value
2894
2895         test            $len,$len
2896         jnz             .Lblocks_vpmadd52_4x
2897
2898 .Lno_data_vpmadd52:
2899         ret
2900 .size   poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
2901 ___
2902 }
2903 {
2904 ########################################################################
2905 # As implied by its name 4x subroutine processes 4 blocks in parallel
2906 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
2907 # and is handled in 256-bit %ymm registers.
2908
2909 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
2910 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
2911 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
2912
2913 $code.=<<___;
2914 .type   poly1305_blocks_vpmadd52_4x,\@function,4
2915 .align  32
2916 poly1305_blocks_vpmadd52_4x:
2917         shr     \$4,$len
2918         jz      .Lno_data_vpmadd52_4x           # too short
2919
2920         shl     \$40,$padbit
2921         mov     64($ctx),%r8                    # peek on power of the key
2922
2923 .Lblocks_vpmadd52_4x:
2924         vpbroadcastq    $padbit,$PAD
2925
2926         vmovdqa64       .Lx_mask44(%rip),$mask44
2927         mov             \$5,%eax
2928         vmovdqa64       .Lx_mask42(%rip),$mask42
2929         kmovw           %eax,%k1                # used in 2x path
2930
2931         test            %r8,%r8                 # is power value impossible?
2932         js              .Linit_vpmadd52         # if it is, then init R[4]
2933
2934         vmovq           0($ctx),%x#$H0          # load current hash value
2935         vmovq           8($ctx),%x#$H1
2936         vmovq           16($ctx),%x#$H2
2937
2938         test            \$3,$len                # is length 4*n+2?
2939         jnz             .Lblocks_vpmadd52_2x_do
2940
2941 .Lblocks_vpmadd52_4x_do:
2942         vpbroadcastq    64($ctx),$R0            # load 4th power of the key
2943         vpbroadcastq    96($ctx),$R1
2944         vpbroadcastq    128($ctx),$R2
2945         vpbroadcastq    160($ctx),$S1
2946
2947 .Lblocks_vpmadd52_4x_key_loaded:
2948         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
2949         vpaddq          $R2,$S2,$S2
2950         vpsllq          \$2,$S2,$S2
2951
2952         test            \$7,$len                # is len 8*n?
2953         jz              .Lblocks_vpmadd52_8x
2954
2955         vmovdqu64       16*0($inp),$T2          # load data
2956         vmovdqu64       16*2($inp),$T3
2957         lea             16*4($inp),$inp
2958
2959         vpunpcklqdq     $T3,$T2,$T1             # transpose data
2960         vpunpckhqdq     $T3,$T2,$T3
2961
2962         # at this point 64-bit lanes are ordered as 3-1-2-0
2963
2964         vpsrlq          \$24,$T3,$T2            # splat the data
2965         vporq           $PAD,$T2,$T2
2966          vpaddq         $T2,$H2,$H2             # accumulate input
2967         vpandq          $mask44,$T1,$T0
2968         vpsrlq          \$44,$T1,$T1
2969         vpsllq          \$20,$T3,$T3
2970         vporq           $T3,$T1,$T1
2971         vpandq          $mask44,$T1,$T1
2972
2973         sub             \$4,$len
2974         jz              .Ltail_vpmadd52_4x
2975         jmp             .Loop_vpmadd52_4x
2976         ud2
2977
2978 .align  32
2979 .Linit_vpmadd52:
2980         vmovq           24($ctx),%x#$S1         # load key
2981         vmovq           56($ctx),%x#$H2
2982         vmovq           32($ctx),%x#$S2
2983         vmovq           40($ctx),%x#$R0
2984         vmovq           48($ctx),%x#$R1
2985
2986         vmovdqa         $R0,$H0
2987         vmovdqa         $R1,$H1
2988         vmovdqa         $H2,$R2
2989
2990         mov             \$2,%eax
2991
2992 .Lmul_init_vpmadd52:
2993         vpxorq          $D0lo,$D0lo,$D0lo
2994         vpmadd52luq     $H2,$S1,$D0lo
2995         vpxorq          $D0hi,$D0hi,$D0hi
2996         vpmadd52huq     $H2,$S1,$D0hi
2997         vpxorq          $D1lo,$D1lo,$D1lo
2998         vpmadd52luq     $H2,$S2,$D1lo
2999         vpxorq          $D1hi,$D1hi,$D1hi
3000         vpmadd52huq     $H2,$S2,$D1hi
3001         vpxorq          $D2lo,$D2lo,$D2lo
3002         vpmadd52luq     $H2,$R0,$D2lo
3003         vpxorq          $D2hi,$D2hi,$D2hi
3004         vpmadd52huq     $H2,$R0,$D2hi
3005
3006         vpmadd52luq     $H0,$R0,$D0lo
3007         vpmadd52huq     $H0,$R0,$D0hi
3008         vpmadd52luq     $H0,$R1,$D1lo
3009         vpmadd52huq     $H0,$R1,$D1hi
3010         vpmadd52luq     $H0,$R2,$D2lo
3011         vpmadd52huq     $H0,$R2,$D2hi
3012
3013         vpmadd52luq     $H1,$S2,$D0lo
3014         vpmadd52huq     $H1,$S2,$D0hi
3015         vpmadd52luq     $H1,$R0,$D1lo
3016         vpmadd52huq     $H1,$R0,$D1hi
3017         vpmadd52luq     $H1,$R1,$D2lo
3018         vpmadd52huq     $H1,$R1,$D2hi
3019
3020         ################################################################
3021         # partial reduction
3022         vpsrlq          \$44,$D0lo,$tmp
3023         vpsllq          \$8,$D0hi,$D0hi
3024         vpandq          $mask44,$D0lo,$H0
3025         vpaddq          $tmp,$D0hi,$D0hi
3026
3027         vpaddq          $D0hi,$D1lo,$D1lo
3028
3029         vpsrlq          \$44,$D1lo,$tmp
3030         vpsllq          \$8,$D1hi,$D1hi
3031         vpandq          $mask44,$D1lo,$H1
3032         vpaddq          $tmp,$D1hi,$D1hi
3033
3034         vpaddq          $D1hi,$D2lo,$D2lo
3035
3036         vpsrlq          \$42,$D2lo,$tmp
3037         vpsllq          \$10,$D2hi,$D2hi
3038         vpandq          $mask42,$D2lo,$H2
3039         vpaddq          $tmp,$D2hi,$D2hi
3040
3041         vpaddq          $D2hi,$H0,$H0
3042         vpsllq          \$2,$D2hi,$D2hi
3043
3044         vpaddq          $D2hi,$H0,$H0
3045
3046         vpsrlq          \$44,$H0,$tmp           # additional step
3047         vpandq          $mask44,$H0,$H0
3048
3049         vpaddq          $tmp,$H1,$H1
3050
3051         dec             %eax
3052         jz              .Ldone_init_vpmadd52
3053
3054         vpunpcklqdq     $R1,$H1,$R1             # 1,2
3055         vpbroadcastq    %x#$H1,%x#$H1           # 2,2
3056         vpunpcklqdq     $R2,$H2,$R2
3057         vpbroadcastq    %x#$H2,%x#$H2
3058         vpunpcklqdq     $R0,$H0,$R0
3059         vpbroadcastq    %x#$H0,%x#$H0
3060
3061         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3062         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3063         vpaddq          $R1,$S1,$S1
3064         vpaddq          $R2,$S2,$S2
3065         vpsllq          \$2,$S1,$S1
3066         vpsllq          \$2,$S2,$S2
3067
3068         jmp             .Lmul_init_vpmadd52
3069         ud2
3070
3071 .align  32
3072 .Ldone_init_vpmadd52:
3073         vinserti128     \$1,%x#$R1,$H1,$R1      # 1,2,3,4
3074         vinserti128     \$1,%x#$R2,$H2,$R2
3075         vinserti128     \$1,%x#$R0,$H0,$R0
3076
3077         vpermq          \$0b11011000,$R1,$R1    # 1,3,2,4
3078         vpermq          \$0b11011000,$R2,$R2
3079         vpermq          \$0b11011000,$R0,$R0
3080
3081         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3082         vpaddq          $R1,$S1,$S1
3083         vpsllq          \$2,$S1,$S1
3084
3085         vmovq           0($ctx),%x#$H0          # load current hash value
3086         vmovq           8($ctx),%x#$H1
3087         vmovq           16($ctx),%x#$H2
3088
3089         test            \$3,$len                # is length 4*n+2?
3090         jnz             .Ldone_init_vpmadd52_2x
3091
3092         vmovdqu64       $R0,64($ctx)            # save key powers
3093         vpbroadcastq    %x#$R0,$R0              # broadcast 4th power
3094         vmovdqu64       $R1,96($ctx)
3095         vpbroadcastq    %x#$R1,$R1
3096         vmovdqu64       $R2,128($ctx)
3097         vpbroadcastq    %x#$R2,$R2
3098         vmovdqu64       $S1,160($ctx)
3099         vpbroadcastq    %x#$S1,$S1
3100
3101         jmp             .Lblocks_vpmadd52_4x_key_loaded
3102         ud2
3103
3104 .align  32
3105 .Ldone_init_vpmadd52_2x:
3106         vmovdqu64       $R0,64($ctx)            # save key powers
3107         vpsrldq         \$8,$R0,$R0             # 0-1-0-2
3108         vmovdqu64       $R1,96($ctx)
3109         vpsrldq         \$8,$R1,$R1
3110         vmovdqu64       $R2,128($ctx)
3111         vpsrldq         \$8,$R2,$R2
3112         vmovdqu64       $S1,160($ctx)
3113         vpsrldq         \$8,$S1,$S1
3114         jmp             .Lblocks_vpmadd52_2x_key_loaded
3115         ud2
3116
3117 .align  32
3118 .Lblocks_vpmadd52_2x_do:
3119         vmovdqu64       128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers
3120         vmovdqu64       160+8($ctx),${S1}{%k1}{z}
3121         vmovdqu64       64+8($ctx),${R0}{%k1}{z}
3122         vmovdqu64       96+8($ctx),${R1}{%k1}{z}
3123
3124 .Lblocks_vpmadd52_2x_key_loaded:
3125         vmovdqu64       16*0($inp),$T2          # load data
3126         vpxorq          $T3,$T3,$T3
3127         lea             16*2($inp),$inp
3128
3129         vpunpcklqdq     $T3,$T2,$T1             # transpose data
3130         vpunpckhqdq     $T3,$T2,$T3
3131
3132         # at this point 64-bit lanes are ordered as x-1-x-0
3133
3134         vpsrlq          \$24,$T3,$T2            # splat the data
3135         vporq           $PAD,$T2,$T2
3136          vpaddq         $T2,$H2,$H2             # accumulate input
3137         vpandq          $mask44,$T1,$T0
3138         vpsrlq          \$44,$T1,$T1
3139         vpsllq          \$20,$T3,$T3
3140         vporq           $T3,$T1,$T1
3141         vpandq          $mask44,$T1,$T1
3142
3143         jmp             .Ltail_vpmadd52_2x
3144         ud2
3145
3146 .align  32
3147 .Loop_vpmadd52_4x:
3148         #vpaddq         $T2,$H2,$H2             # accumulate input
3149         vpaddq          $T0,$H0,$H0
3150         vpaddq          $T1,$H1,$H1
3151
3152         vpxorq          $D0lo,$D0lo,$D0lo
3153         vpmadd52luq     $H2,$S1,$D0lo
3154         vpxorq          $D0hi,$D0hi,$D0hi
3155         vpmadd52huq     $H2,$S1,$D0hi
3156         vpxorq          $D1lo,$D1lo,$D1lo
3157         vpmadd52luq     $H2,$S2,$D1lo
3158         vpxorq          $D1hi,$D1hi,$D1hi
3159         vpmadd52huq     $H2,$S2,$D1hi
3160         vpxorq          $D2lo,$D2lo,$D2lo
3161         vpmadd52luq     $H2,$R0,$D2lo
3162         vpxorq          $D2hi,$D2hi,$D2hi
3163         vpmadd52huq     $H2,$R0,$D2hi
3164
3165          vmovdqu64      16*0($inp),$T2          # load data
3166          vmovdqu64      16*2($inp),$T3
3167          lea            16*4($inp),$inp
3168         vpmadd52luq     $H0,$R0,$D0lo
3169         vpmadd52huq     $H0,$R0,$D0hi
3170         vpmadd52luq     $H0,$R1,$D1lo
3171         vpmadd52huq     $H0,$R1,$D1hi
3172         vpmadd52luq     $H0,$R2,$D2lo
3173         vpmadd52huq     $H0,$R2,$D2hi
3174
3175          vpunpcklqdq    $T3,$T2,$T1             # transpose data
3176          vpunpckhqdq    $T3,$T2,$T3
3177         vpmadd52luq     $H1,$S2,$D0lo
3178         vpmadd52huq     $H1,$S2,$D0hi
3179         vpmadd52luq     $H1,$R0,$D1lo
3180         vpmadd52huq     $H1,$R0,$D1hi
3181         vpmadd52luq     $H1,$R1,$D2lo
3182         vpmadd52huq     $H1,$R1,$D2hi
3183
3184         ################################################################
3185         # partial reduction (interleaved with data splat)
3186         vpsrlq          \$44,$D0lo,$tmp
3187         vpsllq          \$8,$D0hi,$D0hi
3188         vpandq          $mask44,$D0lo,$H0
3189         vpaddq          $tmp,$D0hi,$D0hi
3190
3191          vpsrlq         \$24,$T3,$T2
3192          vporq          $PAD,$T2,$T2
3193         vpaddq          $D0hi,$D1lo,$D1lo
3194
3195         vpsrlq          \$44,$D1lo,$tmp
3196         vpsllq          \$8,$D1hi,$D1hi
3197         vpandq          $mask44,$D1lo,$H1
3198         vpaddq          $tmp,$D1hi,$D1hi
3199
3200          vpandq         $mask44,$T1,$T0
3201          vpsrlq         \$44,$T1,$T1
3202          vpsllq         \$20,$T3,$T3
3203         vpaddq          $D1hi,$D2lo,$D2lo
3204
3205         vpsrlq          \$42,$D2lo,$tmp
3206         vpsllq          \$10,$D2hi,$D2hi
3207         vpandq          $mask42,$D2lo,$H2
3208         vpaddq          $tmp,$D2hi,$D2hi
3209
3210           vpaddq        $T2,$H2,$H2             # accumulate input
3211         vpaddq          $D2hi,$H0,$H0
3212         vpsllq          \$2,$D2hi,$D2hi
3213
3214         vpaddq          $D2hi,$H0,$H0
3215          vporq          $T3,$T1,$T1
3216          vpandq         $mask44,$T1,$T1
3217
3218         vpsrlq          \$44,$H0,$tmp           # additional step
3219         vpandq          $mask44,$H0,$H0
3220
3221         vpaddq          $tmp,$H1,$H1
3222
3223         sub             \$4,$len                # len-=64
3224         jnz             .Loop_vpmadd52_4x
3225
3226 .Ltail_vpmadd52_4x:
3227         vmovdqu64       128($ctx),$R2           # load all key powers
3228         vmovdqu64       160($ctx),$S1
3229         vmovdqu64       64($ctx),$R0
3230         vmovdqu64       96($ctx),$R1
3231
3232 .Ltail_vpmadd52_2x:
3233         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3234         vpaddq          $R2,$S2,$S2
3235         vpsllq          \$2,$S2,$S2
3236
3237         #vpaddq         $T2,$H2,$H2             # accumulate input
3238         vpaddq          $T0,$H0,$H0
3239         vpaddq          $T1,$H1,$H1
3240
3241         vpxorq          $D0lo,$D0lo,$D0lo
3242         vpmadd52luq     $H2,$S1,$D0lo
3243         vpxorq          $D0hi,$D0hi,$D0hi
3244         vpmadd52huq     $H2,$S1,$D0hi
3245         vpxorq          $D1lo,$D1lo,$D1lo
3246         vpmadd52luq     $H2,$S2,$D1lo
3247         vpxorq          $D1hi,$D1hi,$D1hi
3248         vpmadd52huq     $H2,$S2,$D1hi
3249         vpxorq          $D2lo,$D2lo,$D2lo
3250         vpmadd52luq     $H2,$R0,$D2lo
3251         vpxorq          $D2hi,$D2hi,$D2hi
3252         vpmadd52huq     $H2,$R0,$D2hi
3253
3254         vpmadd52luq     $H0,$R0,$D0lo
3255         vpmadd52huq     $H0,$R0,$D0hi
3256         vpmadd52luq     $H0,$R1,$D1lo
3257         vpmadd52huq     $H0,$R1,$D1hi
3258         vpmadd52luq     $H0,$R2,$D2lo
3259         vpmadd52huq     $H0,$R2,$D2hi
3260
3261         vpmadd52luq     $H1,$S2,$D0lo
3262         vpmadd52huq     $H1,$S2,$D0hi
3263         vpmadd52luq     $H1,$R0,$D1lo
3264         vpmadd52huq     $H1,$R0,$D1hi
3265         vpmadd52luq     $H1,$R1,$D2lo
3266         vpmadd52huq     $H1,$R1,$D2hi
3267
3268         ################################################################
3269         # horizontal addition
3270
3271         mov             \$1,%eax
3272         kmovw           %eax,%k1
3273         vpsrldq         \$8,$D0lo,$T0
3274         vpsrldq         \$8,$D0hi,$H0
3275         vpsrldq         \$8,$D1lo,$T1
3276         vpsrldq         \$8,$D1hi,$H1
3277         vpaddq          $T0,$D0lo,$D0lo
3278         vpaddq          $H0,$D0hi,$D0hi
3279         vpsrldq         \$8,$D2lo,$T2
3280         vpsrldq         \$8,$D2hi,$H2
3281         vpaddq          $T1,$D1lo,$D1lo
3282         vpaddq          $H1,$D1hi,$D1hi
3283          vpermq         \$0x2,$D0lo,$T0
3284          vpermq         \$0x2,$D0hi,$H0
3285         vpaddq          $T2,$D2lo,$D2lo
3286         vpaddq          $H2,$D2hi,$D2hi
3287
3288         vpermq          \$0x2,$D1lo,$T1
3289         vpermq          \$0x2,$D1hi,$H1
3290         vpaddq          $T0,$D0lo,${D0lo}{%k1}{z}
3291         vpaddq          $H0,$D0hi,${D0hi}{%k1}{z}
3292         vpermq          \$0x2,$D2lo,$T2
3293         vpermq          \$0x2,$D2hi,$H2
3294         vpaddq          $T1,$D1lo,${D1lo}{%k1}{z}
3295         vpaddq          $H1,$D1hi,${D1hi}{%k1}{z}
3296         vpaddq          $T2,$D2lo,${D2lo}{%k1}{z}
3297         vpaddq          $H2,$D2hi,${D2hi}{%k1}{z}
3298
3299         ################################################################
3300         # partial reduction
3301         vpsrlq          \$44,$D0lo,$tmp
3302         vpsllq          \$8,$D0hi,$D0hi
3303         vpandq          $mask44,$D0lo,$H0
3304         vpaddq          $tmp,$D0hi,$D0hi
3305
3306         vpaddq          $D0hi,$D1lo,$D1lo
3307
3308         vpsrlq          \$44,$D1lo,$tmp
3309         vpsllq          \$8,$D1hi,$D1hi
3310         vpandq          $mask44,$D1lo,$H1
3311         vpaddq          $tmp,$D1hi,$D1hi
3312
3313         vpaddq          $D1hi,$D2lo,$D2lo
3314
3315         vpsrlq          \$42,$D2lo,$tmp
3316         vpsllq          \$10,$D2hi,$D2hi
3317         vpandq          $mask42,$D2lo,$H2
3318         vpaddq          $tmp,$D2hi,$D2hi
3319
3320         vpaddq          $D2hi,$H0,$H0
3321         vpsllq          \$2,$D2hi,$D2hi
3322
3323         vpaddq          $D2hi,$H0,$H0
3324
3325         vpsrlq          \$44,$H0,$tmp           # additional step
3326         vpandq          $mask44,$H0,$H0
3327
3328         vpaddq          $tmp,$H1,$H1
3329                                                 # at this point $len is
3330                                                 # either 4*n+2 or 0...
3331         sub             \$2,$len                # len-=32
3332         ja              .Lblocks_vpmadd52_4x_do
3333
3334         vmovq           %x#$H0,0($ctx)
3335         vmovq           %x#$H1,8($ctx)
3336         vmovq           %x#$H2,16($ctx)
3337         vzeroall
3338
3339 .Lno_data_vpmadd52_4x:
3340         ret
3341 .size   poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
3342 ___
3343 }
3344 {
3345 ########################################################################
3346 # As implied by its name 8x subroutine processes 8 blocks in parallel...
3347 # This is intermediate version, as it's used only in cases when input
3348 # length is either 8*n, 8*n+1 or 8*n+2...
3349
3350 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3351 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3352 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3353 my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
3354
3355 $code.=<<___;
3356 .type   poly1305_blocks_vpmadd52_8x,\@function,4
3357 .align  32
3358 poly1305_blocks_vpmadd52_8x:
3359         shr     \$4,$len
3360         jz      .Lno_data_vpmadd52_8x           # too short
3361
3362         shl     \$40,$padbit
3363         mov     64($ctx),%r8                    # peek on power of the key
3364
3365         vmovdqa64       .Lx_mask44(%rip),$mask44
3366         vmovdqa64       .Lx_mask42(%rip),$mask42
3367
3368         test    %r8,%r8                         # is power value impossible?
3369         js      .Linit_vpmadd52                 # if it is, then init R[4]
3370
3371         vmovq   0($ctx),%x#$H0                  # load current hash value
3372         vmovq   8($ctx),%x#$H1
3373         vmovq   16($ctx),%x#$H2
3374
3375 .Lblocks_vpmadd52_8x:
3376         ################################################################
3377         # fist we calculate more key powers
3378
3379         vmovdqu64       128($ctx),$R2           # load 1-3-2-4 powers
3380         vmovdqu64       160($ctx),$S1
3381         vmovdqu64       64($ctx),$R0
3382         vmovdqu64       96($ctx),$R1
3383
3384         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3385         vpaddq          $R2,$S2,$S2
3386         vpsllq          \$2,$S2,$S2
3387
3388         vpbroadcastq    %x#$R2,$RR2             # broadcast 4th power
3389         vpbroadcastq    %x#$R0,$RR0
3390         vpbroadcastq    %x#$R1,$RR1
3391
3392         vpxorq          $D0lo,$D0lo,$D0lo
3393         vpmadd52luq     $RR2,$S1,$D0lo
3394         vpxorq          $D0hi,$D0hi,$D0hi
3395         vpmadd52huq     $RR2,$S1,$D0hi
3396         vpxorq          $D1lo,$D1lo,$D1lo
3397         vpmadd52luq     $RR2,$S2,$D1lo
3398         vpxorq          $D1hi,$D1hi,$D1hi
3399         vpmadd52huq     $RR2,$S2,$D1hi
3400         vpxorq          $D2lo,$D2lo,$D2lo
3401         vpmadd52luq     $RR2,$R0,$D2lo
3402         vpxorq          $D2hi,$D2hi,$D2hi
3403         vpmadd52huq     $RR2,$R0,$D2hi
3404
3405         vpmadd52luq     $RR0,$R0,$D0lo
3406         vpmadd52huq     $RR0,$R0,$D0hi
3407         vpmadd52luq     $RR0,$R1,$D1lo
3408         vpmadd52huq     $RR0,$R1,$D1hi
3409         vpmadd52luq     $RR0,$R2,$D2lo
3410         vpmadd52huq     $RR0,$R2,$D2hi
3411
3412         vpmadd52luq     $RR1,$S2,$D0lo
3413         vpmadd52huq     $RR1,$S2,$D0hi
3414         vpmadd52luq     $RR1,$R0,$D1lo
3415         vpmadd52huq     $RR1,$R0,$D1hi
3416         vpmadd52luq     $RR1,$R1,$D2lo
3417         vpmadd52huq     $RR1,$R1,$D2hi
3418
3419         ################################################################
3420         # partial reduction
3421         vpsrlq          \$44,$D0lo,$tmp
3422         vpsllq          \$8,$D0hi,$D0hi
3423         vpandq          $mask44,$D0lo,$RR0
3424         vpaddq          $tmp,$D0hi,$D0hi
3425
3426         vpaddq          $D0hi,$D1lo,$D1lo
3427
3428         vpsrlq          \$44,$D1lo,$tmp
3429         vpsllq          \$8,$D1hi,$D1hi
3430         vpandq          $mask44,$D1lo,$RR1
3431         vpaddq          $tmp,$D1hi,$D1hi
3432
3433         vpaddq          $D1hi,$D2lo,$D2lo
3434
3435         vpsrlq          \$42,$D2lo,$tmp
3436         vpsllq          \$10,$D2hi,$D2hi
3437         vpandq          $mask42,$D2lo,$RR2
3438         vpaddq          $tmp,$D2hi,$D2hi
3439
3440         vpaddq          $D2hi,$RR0,$RR0
3441         vpsllq          \$2,$D2hi,$D2hi
3442
3443         vpaddq          $D2hi,$RR0,$RR0
3444
3445         vpsrlq          \$44,$RR0,$tmp          # additional step
3446         vpandq          $mask44,$RR0,$RR0
3447
3448         vpaddq          $tmp,$RR1,$RR1
3449
3450         ################################################################
3451         # At this point Rx holds 1324 powers, RRx - 5768, and the goal
3452         # is 15263748, which reflects how data is loaded...
3453
3454         vpunpcklqdq     $R2,$RR2,$T2            # 3748
3455         vpunpckhqdq     $R2,$RR2,$R2            # 1526
3456         vpunpcklqdq     $R0,$RR0,$T0
3457         vpunpckhqdq     $R0,$RR0,$R0
3458         vpunpcklqdq     $R1,$RR1,$T1
3459         vpunpckhqdq     $R1,$RR1,$R1
3460 ___
3461 ######## switch to %zmm
3462 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3463 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3464 map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3465 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
3466
3467 $code.=<<___;
3468         vshufi64x2      \$0x44,$R2,$T2,$RR2     # 15263748
3469         vshufi64x2      \$0x44,$R0,$T0,$RR0
3470         vshufi64x2      \$0x44,$R1,$T1,$RR1
3471
3472         vmovdqu64       16*0($inp),$T2          # load data
3473         vmovdqu64       16*4($inp),$T3
3474         lea             16*8($inp),$inp
3475
3476         vpsllq          \$2,$RR2,$SS2           # S2 = R2*5*4
3477         vpsllq          \$2,$RR1,$SS1           # S1 = R1*5*4
3478         vpaddq          $RR2,$SS2,$SS2
3479         vpaddq          $RR1,$SS1,$SS1
3480         vpsllq          \$2,$SS2,$SS2
3481         vpsllq          \$2,$SS1,$SS1
3482
3483         vpbroadcastq    $padbit,$PAD
3484         vpbroadcastq    %x#$mask44,$mask44
3485         vpbroadcastq    %x#$mask42,$mask42
3486
3487         vpbroadcastq    %x#$SS1,$S1             # broadcast 8th power
3488         vpbroadcastq    %x#$SS2,$S2
3489         vpbroadcastq    %x#$RR0,$R0
3490         vpbroadcastq    %x#$RR1,$R1
3491         vpbroadcastq    %x#$RR2,$R2
3492
3493         vpunpcklqdq     $T3,$T2,$T1             # transpose data
3494         vpunpckhqdq     $T3,$T2,$T3
3495
3496         # at this point 64-bit lanes are ordered as 73625140
3497
3498         vpsrlq          \$24,$T3,$T2            # splat the data
3499         vporq           $PAD,$T2,$T2
3500          vpaddq         $T2,$H2,$H2             # accumulate input
3501         vpandq          $mask44,$T1,$T0
3502         vpsrlq          \$44,$T1,$T1
3503         vpsllq          \$20,$T3,$T3
3504         vporq           $T3,$T1,$T1
3505         vpandq          $mask44,$T1,$T1
3506
3507         sub             \$8,$len
3508         jz              .Ltail_vpmadd52_8x
3509         jmp             .Loop_vpmadd52_8x
3510
3511 .align  32
3512 .Loop_vpmadd52_8x:
3513         #vpaddq         $T2,$H2,$H2             # accumulate input
3514         vpaddq          $T0,$H0,$H0
3515         vpaddq          $T1,$H1,$H1
3516
3517         vpxorq          $D0lo,$D0lo,$D0lo
3518         vpmadd52luq     $H2,$S1,$D0lo
3519         vpxorq          $D0hi,$D0hi,$D0hi
3520         vpmadd52huq     $H2,$S1,$D0hi
3521         vpxorq          $D1lo,$D1lo,$D1lo
3522         vpmadd52luq     $H2,$S2,$D1lo
3523         vpxorq          $D1hi,$D1hi,$D1hi
3524         vpmadd52huq     $H2,$S2,$D1hi
3525         vpxorq          $D2lo,$D2lo,$D2lo
3526         vpmadd52luq     $H2,$R0,$D2lo
3527         vpxorq          $D2hi,$D2hi,$D2hi
3528         vpmadd52huq     $H2,$R0,$D2hi
3529
3530          vmovdqu64      16*0($inp),$T2          # load data
3531          vmovdqu64      16*4($inp),$T3
3532          lea            16*8($inp),$inp
3533         vpmadd52luq     $H0,$R0,$D0lo
3534         vpmadd52huq     $H0,$R0,$D0hi
3535         vpmadd52luq     $H0,$R1,$D1lo
3536         vpmadd52huq     $H0,$R1,$D1hi
3537         vpmadd52luq     $H0,$R2,$D2lo
3538         vpmadd52huq     $H0,$R2,$D2hi
3539
3540          vpunpcklqdq    $T3,$T2,$T1             # transpose data
3541          vpunpckhqdq    $T3,$T2,$T3
3542         vpmadd52luq     $H1,$S2,$D0lo
3543         vpmadd52huq     $H1,$S2,$D0hi
3544         vpmadd52luq     $H1,$R0,$D1lo
3545         vpmadd52huq     $H1,$R0,$D1hi
3546         vpmadd52luq     $H1,$R1,$D2lo
3547         vpmadd52huq     $H1,$R1,$D2hi
3548
3549         ################################################################
3550         # partial reduction (interleaved with data splat)
3551         vpsrlq          \$44,$D0lo,$tmp
3552         vpsllq          \$8,$D0hi,$D0hi
3553         vpandq          $mask44,$D0lo,$H0
3554         vpaddq          $tmp,$D0hi,$D0hi
3555
3556          vpsrlq         \$24,$T3,$T2
3557          vporq          $PAD,$T2,$T2
3558         vpaddq          $D0hi,$D1lo,$D1lo
3559
3560         vpsrlq          \$44,$D1lo,$tmp
3561         vpsllq          \$8,$D1hi,$D1hi
3562         vpandq          $mask44,$D1lo,$H1
3563         vpaddq          $tmp,$D1hi,$D1hi
3564
3565          vpandq         $mask44,$T1,$T0
3566          vpsrlq         \$44,$T1,$T1
3567          vpsllq         \$20,$T3,$T3
3568         vpaddq          $D1hi,$D2lo,$D2lo
3569
3570         vpsrlq          \$42,$D2lo,$tmp
3571         vpsllq          \$10,$D2hi,$D2hi
3572         vpandq          $mask42,$D2lo,$H2
3573         vpaddq          $tmp,$D2hi,$D2hi
3574
3575           vpaddq        $T2,$H2,$H2             # accumulate input
3576         vpaddq          $D2hi,$H0,$H0
3577         vpsllq          \$2,$D2hi,$D2hi
3578
3579         vpaddq          $D2hi,$H0,$H0
3580          vporq          $T3,$T1,$T1
3581          vpandq         $mask44,$T1,$T1
3582
3583         vpsrlq          \$44,$H0,$tmp           # additional step
3584         vpandq          $mask44,$H0,$H0
3585
3586         vpaddq          $tmp,$H1,$H1
3587
3588         sub             \$8,$len                # len-=128
3589         jnz             .Loop_vpmadd52_8x
3590
3591 .Ltail_vpmadd52_8x:
3592         #vpaddq         $T2,$H2,$H2             # accumulate input
3593         vpaddq          $T0,$H0,$H0
3594         vpaddq          $T1,$H1,$H1
3595
3596         vpxorq          $D0lo,$D0lo,$D0lo
3597         vpmadd52luq     $H2,$SS1,$D0lo
3598         vpxorq          $D0hi,$D0hi,$D0hi
3599         vpmadd52huq     $H2,$SS1,$D0hi
3600         vpxorq          $D1lo,$D1lo,$D1lo
3601         vpmadd52luq     $H2,$SS2,$D1lo
3602         vpxorq          $D1hi,$D1hi,$D1hi
3603         vpmadd52huq     $H2,$SS2,$D1hi
3604         vpxorq          $D2lo,$D2lo,$D2lo
3605         vpmadd52luq     $H2,$RR0,$D2lo
3606         vpxorq          $D2hi,$D2hi,$D2hi
3607         vpmadd52huq     $H2,$RR0,$D2hi
3608
3609         vpmadd52luq     $H0,$RR0,$D0lo
3610         vpmadd52huq     $H0,$RR0,$D0hi
3611         vpmadd52luq     $H0,$RR1,$D1lo
3612         vpmadd52huq     $H0,$RR1,$D1hi
3613         vpmadd52luq     $H0,$RR2,$D2lo
3614         vpmadd52huq     $H0,$RR2,$D2hi
3615
3616         vpmadd52luq     $H1,$SS2,$D0lo
3617         vpmadd52huq     $H1,$SS2,$D0hi
3618         vpmadd52luq     $H1,$RR0,$D1lo
3619         vpmadd52huq     $H1,$RR0,$D1hi
3620         vpmadd52luq     $H1,$RR1,$D2lo
3621         vpmadd52huq     $H1,$RR1,$D2hi
3622
3623         ################################################################
3624         # horizontal addition
3625
3626         mov             \$1,%eax
3627         kmovw           %eax,%k1
3628         vpsrldq         \$8,$D0lo,$T0
3629         vpsrldq         \$8,$D0hi,$H0
3630         vpsrldq         \$8,$D1lo,$T1
3631         vpsrldq         \$8,$D1hi,$H1
3632         vpaddq          $T0,$D0lo,$D0lo
3633         vpaddq          $H0,$D0hi,$D0hi
3634         vpsrldq         \$8,$D2lo,$T2
3635         vpsrldq         \$8,$D2hi,$H2
3636         vpaddq          $T1,$D1lo,$D1lo
3637         vpaddq          $H1,$D1hi,$D1hi
3638          vpermq         \$0x2,$D0lo,$T0
3639          vpermq         \$0x2,$D0hi,$H0
3640         vpaddq          $T2,$D2lo,$D2lo
3641         vpaddq          $H2,$D2hi,$D2hi
3642
3643         vpermq          \$0x2,$D1lo,$T1
3644         vpermq          \$0x2,$D1hi,$H1
3645         vpaddq          $T0,$D0lo,$D0lo
3646         vpaddq          $H0,$D0hi,$D0hi
3647         vpermq          \$0x2,$D2lo,$T2
3648         vpermq          \$0x2,$D2hi,$H2
3649         vpaddq          $T1,$D1lo,$D1lo
3650         vpaddq          $H1,$D1hi,$D1hi
3651          vextracti64x4  \$1,$D0lo,%y#$T0
3652          vextracti64x4  \$1,$D0hi,%y#$H0
3653         vpaddq          $T2,$D2lo,$D2lo
3654         vpaddq          $H2,$D2hi,$D2hi
3655
3656         vextracti64x4   \$1,$D1lo,%y#$T1
3657         vextracti64x4   \$1,$D1hi,%y#$H1
3658         vextracti64x4   \$1,$D2lo,%y#$T2
3659         vextracti64x4   \$1,$D2hi,%y#$H2
3660 ___
3661 ######## switch back to %ymm
3662 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3663 map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3664 map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3665
3666 $code.=<<___;
3667         vpaddq          $T0,$D0lo,${D0lo}{%k1}{z}
3668         vpaddq          $H0,$D0hi,${D0hi}{%k1}{z}
3669         vpaddq          $T1,$D1lo,${D1lo}{%k1}{z}
3670         vpaddq          $H1,$D1hi,${D1hi}{%k1}{z}
3671         vpaddq          $T2,$D2lo,${D2lo}{%k1}{z}
3672         vpaddq          $H2,$D2hi,${D2hi}{%k1}{z}
3673
3674         ################################################################
3675         # partial reduction
3676         vpsrlq          \$44,$D0lo,$tmp
3677         vpsllq          \$8,$D0hi,$D0hi
3678         vpandq          $mask44,$D0lo,$H0
3679         vpaddq          $tmp,$D0hi,$D0hi
3680
3681         vpaddq          $D0hi,$D1lo,$D1lo
3682
3683         vpsrlq          \$44,$D1lo,$tmp
3684         vpsllq          \$8,$D1hi,$D1hi
3685         vpandq          $mask44,$D1lo,$H1
3686         vpaddq          $tmp,$D1hi,$D1hi
3687
3688         vpaddq          $D1hi,$D2lo,$D2lo
3689
3690         vpsrlq          \$42,$D2lo,$tmp
3691         vpsllq          \$10,$D2hi,$D2hi
3692         vpandq          $mask42,$D2lo,$H2
3693         vpaddq          $tmp,$D2hi,$D2hi
3694
3695         vpaddq          $D2hi,$H0,$H0
3696         vpsllq          \$2,$D2hi,$D2hi
3697
3698         vpaddq          $D2hi,$H0,$H0
3699
3700         vpsrlq          \$44,$H0,$tmp           # additional step
3701         vpandq          $mask44,$H0,$H0
3702
3703         vpaddq          $tmp,$H1,$H1
3704
3705         ################################################################
3706
3707         vmovq           %x#$H0,0($ctx)
3708         vmovq           %x#$H1,8($ctx)
3709         vmovq           %x#$H2,16($ctx)
3710         vzeroall
3711
3712 .Lno_data_vpmadd52_8x:
3713         ret
3714 .size   poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
3715 ___
3716 }
3717 $code.=<<___;
3718 .type   poly1305_emit_base2_44,\@function,3
3719 .align  32
3720 poly1305_emit_base2_44:
3721         mov     0($ctx),%r8     # load hash value
3722         mov     8($ctx),%r9
3723         mov     16($ctx),%r10
3724
3725         mov     %r9,%rax
3726         shr     \$20,%r9
3727         shl     \$44,%rax
3728         mov     %r10,%rcx
3729         shr     \$40,%r10
3730         shl     \$24,%rcx
3731
3732         add     %rax,%r8
3733         adc     %rcx,%r9
3734         adc     \$0,%r10
3735
3736         mov     %r8,%rax
3737         add     \$5,%r8         # compare to modulus
3738         mov     %r9,%rcx
3739         adc     \$0,%r9
3740         adc     \$0,%r10
3741         shr     \$2,%r10        # did 130-bit value overflow?
3742         cmovnz  %r8,%rax
3743         cmovnz  %r9,%rcx
3744
3745         add     0($nonce),%rax  # accumulate nonce
3746         adc     8($nonce),%rcx
3747         mov     %rax,0($mac)    # write result
3748         mov     %rcx,8($mac)
3749
3750         ret
3751 .size   poly1305_emit_base2_44,.-poly1305_emit_base2_44
3752 ___
3753 }       }       }
3754 $code.=<<___;
3755 .align  64
3756 .Lconst:
3757 .Lmask24:
3758 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
3759 .L129:
3760 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
3761 .Lmask26:
3762 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
3763 .Lpermd_avx2:
3764 .long   2,2,2,3,2,0,2,1
3765 .Lpermd_avx512:
3766 .long   0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
3767
3768 .L2_44_inp_permd:
3769 .long   0,1,1,2,2,3,7,7
3770 .L2_44_inp_shift:
3771 .quad   0,12,24,64
3772 .L2_44_mask:
3773 .quad   0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
3774 .L2_44_shift_rgt:
3775 .quad   44,44,42,64
3776 .L2_44_shift_lft:
3777 .quad   8,8,10,64
3778
3779 .align  64
3780 .Lx_mask44:
3781 .quad   0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
3782 .quad   0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
3783 .Lx_mask42:
3784 .quad   0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
3785 .quad   0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
3786 ___
3787 }
3788
3789 $code.=<<___;
3790 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3791 .align  16
3792 ___
3793
3794 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3795 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
3796 if ($win64) {
3797 $rec="%rcx";
3798 $frame="%rdx";
3799 $context="%r8";
3800 $disp="%r9";
3801
3802 $code.=<<___;
3803 .extern __imp_RtlVirtualUnwind
3804 .type   se_handler,\@abi-omnipotent
3805 .align  16
3806 se_handler:
3807         push    %rsi
3808         push    %rdi
3809         push    %rbx
3810         push    %rbp
3811         push    %r12
3812         push    %r13
3813         push    %r14
3814         push    %r15
3815         pushfq
3816         sub     \$64,%rsp
3817
3818         mov     120($context),%rax      # pull context->Rax
3819         mov     248($context),%rbx      # pull context->Rip
3820
3821         mov     8($disp),%rsi           # disp->ImageBase
3822         mov     56($disp),%r11          # disp->HandlerData
3823
3824         mov     0(%r11),%r10d           # HandlerData[0]
3825         lea     (%rsi,%r10),%r10        # prologue label
3826         cmp     %r10,%rbx               # context->Rip<.Lprologue
3827         jb      .Lcommon_seh_tail
3828
3829         mov     152($context),%rax      # pull context->Rsp
3830
3831         mov     4(%r11),%r10d           # HandlerData[1]
3832         lea     (%rsi,%r10),%r10        # epilogue label
3833         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
3834         jae     .Lcommon_seh_tail
3835
3836         lea     48(%rax),%rax
3837
3838         mov     -8(%rax),%rbx
3839         mov     -16(%rax),%rbp
3840         mov     -24(%rax),%r12
3841         mov     -32(%rax),%r13
3842         mov     -40(%rax),%r14
3843         mov     -48(%rax),%r15
3844         mov     %rbx,144($context)      # restore context->Rbx
3845         mov     %rbp,160($context)      # restore context->Rbp
3846         mov     %r12,216($context)      # restore context->R12
3847         mov     %r13,224($context)      # restore context->R13
3848         mov     %r14,232($context)      # restore context->R14
3849         mov     %r15,240($context)      # restore context->R14
3850
3851         jmp     .Lcommon_seh_tail
3852 .size   se_handler,.-se_handler
3853
3854 .type   avx_handler,\@abi-omnipotent
3855 .align  16
3856 avx_handler:
3857         push    %rsi
3858         push    %rdi
3859         push    %rbx
3860         push    %rbp
3861         push    %r12
3862         push    %r13
3863         push    %r14
3864         push    %r15
3865         pushfq
3866         sub     \$64,%rsp
3867
3868         mov     120($context),%rax      # pull context->Rax
3869         mov     248($context),%rbx      # pull context->Rip
3870
3871         mov     8($disp),%rsi           # disp->ImageBase
3872         mov     56($disp),%r11          # disp->HandlerData
3873
3874         mov     0(%r11),%r10d           # HandlerData[0]
3875         lea     (%rsi,%r10),%r10        # prologue label
3876         cmp     %r10,%rbx               # context->Rip<prologue label
3877         jb      .Lcommon_seh_tail
3878
3879         mov     152($context),%rax      # pull context->Rsp
3880
3881         mov     4(%r11),%r10d           # HandlerData[1]
3882         lea     (%rsi,%r10),%r10        # epilogue label
3883         cmp     %r10,%rbx               # context->Rip>=epilogue label
3884         jae     .Lcommon_seh_tail
3885
3886         mov     208($context),%rax      # pull context->R11
3887
3888         lea     0x50(%rax),%rsi
3889         lea     0xf8(%rax),%rax
3890         lea     512($context),%rdi      # &context.Xmm6
3891         mov     \$20,%ecx
3892         .long   0xa548f3fc              # cld; rep movsq
3893
3894 .Lcommon_seh_tail:
3895         mov     8(%rax),%rdi
3896         mov     16(%rax),%rsi
3897         mov     %rax,152($context)      # restore context->Rsp
3898         mov     %rsi,168($context)      # restore context->Rsi
3899         mov     %rdi,176($context)      # restore context->Rdi
3900
3901         mov     40($disp),%rdi          # disp->ContextRecord
3902         mov     $context,%rsi           # context
3903         mov     \$154,%ecx              # sizeof(CONTEXT)
3904         .long   0xa548f3fc              # cld; rep movsq
3905
3906         mov     $disp,%rsi
3907         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
3908         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
3909         mov     0(%rsi),%r8             # arg3, disp->ControlPc
3910         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
3911         mov     40(%rsi),%r10           # disp->ContextRecord
3912         lea     56(%rsi),%r11           # &disp->HandlerData
3913         lea     24(%rsi),%r12           # &disp->EstablisherFrame
3914         mov     %r10,32(%rsp)           # arg5
3915         mov     %r11,40(%rsp)           # arg6
3916         mov     %r12,48(%rsp)           # arg7
3917         mov     %rcx,56(%rsp)           # arg8, (NULL)
3918         call    *__imp_RtlVirtualUnwind(%rip)
3919
3920         mov     \$1,%eax                # ExceptionContinueSearch
3921         add     \$64,%rsp
3922         popfq
3923         pop     %r15
3924         pop     %r14
3925         pop     %r13
3926         pop     %r12
3927         pop     %rbp
3928         pop     %rbx
3929         pop     %rdi
3930         pop     %rsi
3931         ret
3932 .size   avx_handler,.-avx_handler
3933
3934 .section        .pdata
3935 .align  4
3936         .rva    .LSEH_begin_poly1305_init
3937         .rva    .LSEH_end_poly1305_init
3938         .rva    .LSEH_info_poly1305_init
3939
3940         .rva    .LSEH_begin_poly1305_blocks
3941         .rva    .LSEH_end_poly1305_blocks
3942         .rva    .LSEH_info_poly1305_blocks
3943
3944         .rva    .LSEH_begin_poly1305_emit
3945         .rva    .LSEH_end_poly1305_emit
3946         .rva    .LSEH_info_poly1305_emit
3947 ___
3948 $code.=<<___ if ($avx);
3949         .rva    .LSEH_begin_poly1305_blocks_avx
3950         .rva    .Lbase2_64_avx
3951         .rva    .LSEH_info_poly1305_blocks_avx_1
3952
3953         .rva    .Lbase2_64_avx
3954         .rva    .Leven_avx
3955         .rva    .LSEH_info_poly1305_blocks_avx_2
3956
3957         .rva    .Leven_avx
3958         .rva    .LSEH_end_poly1305_blocks_avx
3959         .rva    .LSEH_info_poly1305_blocks_avx_3
3960
3961         .rva    .LSEH_begin_poly1305_emit_avx
3962         .rva    .LSEH_end_poly1305_emit_avx
3963         .rva    .LSEH_info_poly1305_emit_avx
3964 ___
3965 $code.=<<___ if ($avx>1);
3966         .rva    .LSEH_begin_poly1305_blocks_avx2
3967         .rva    .Lbase2_64_avx2
3968         .rva    .LSEH_info_poly1305_blocks_avx2_1
3969
3970         .rva    .Lbase2_64_avx2
3971         .rva    .Leven_avx2
3972         .rva    .LSEH_info_poly1305_blocks_avx2_2
3973
3974         .rva    .Leven_avx2
3975         .rva    .LSEH_end_poly1305_blocks_avx2
3976         .rva    .LSEH_info_poly1305_blocks_avx2_3
3977 ___
3978 $code.=<<___ if ($avx>2);
3979         .rva    .LSEH_begin_poly1305_blocks_avx512
3980         .rva    .LSEH_end_poly1305_blocks_avx512
3981         .rva    .LSEH_info_poly1305_blocks_avx512
3982 ___
3983 $code.=<<___;
3984 .section        .xdata
3985 .align  8
3986 .LSEH_info_poly1305_init:
3987         .byte   9,0,0,0
3988         .rva    se_handler
3989         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
3990
3991 .LSEH_info_poly1305_blocks:
3992         .byte   9,0,0,0
3993         .rva    se_handler
3994         .rva    .Lblocks_body,.Lblocks_epilogue
3995
3996 .LSEH_info_poly1305_emit:
3997         .byte   9,0,0,0
3998         .rva    se_handler
3999         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
4000 ___
4001 $code.=<<___ if ($avx);
4002 .LSEH_info_poly1305_blocks_avx_1:
4003         .byte   9,0,0,0
4004         .rva    se_handler
4005         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
4006
4007 .LSEH_info_poly1305_blocks_avx_2:
4008         .byte   9,0,0,0
4009         .rva    se_handler
4010         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
4011
4012 .LSEH_info_poly1305_blocks_avx_3:
4013         .byte   9,0,0,0
4014         .rva    avx_handler
4015         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
4016
4017 .LSEH_info_poly1305_emit_avx:
4018         .byte   9,0,0,0
4019         .rva    se_handler
4020         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
4021 ___
4022 $code.=<<___ if ($avx>1);
4023 .LSEH_info_poly1305_blocks_avx2_1:
4024         .byte   9,0,0,0
4025         .rva    se_handler
4026         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
4027
4028 .LSEH_info_poly1305_blocks_avx2_2:
4029         .byte   9,0,0,0
4030         .rva    se_handler
4031         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
4032
4033 .LSEH_info_poly1305_blocks_avx2_3:
4034         .byte   9,0,0,0
4035         .rva    avx_handler
4036         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
4037 ___
4038 $code.=<<___ if ($avx>2);
4039 .LSEH_info_poly1305_blocks_avx512:
4040         .byte   9,0,0,0
4041         .rva    avx_handler
4042         .rva    .Ldo_avx512_body,.Ldo_avx512_epilogue           # HandlerData[]
4043 ___
4044 }
4045
4046 foreach (split('\n',$code)) {
4047         s/\`([^\`]*)\`/eval($1)/ge;
4048         s/%r([a-z]+)#d/%e$1/g;
4049         s/%r([0-9]+)#d/%r$1d/g;
4050         s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
4051
4052         print $_,"\n";
4053 }
4054 close STDOUT;