x86_64 assembly pack: tolerate spaces in source directory name.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Numbers are cycles per processed byte with poly1305_blocks alone,
22 # measured with rdtsc at fixed clock frequency.
23 #
24 #               IALU/gcc-4.8(*) AVX(**)         AVX2
25 # P4            4.46/+120%      -
26 # Core 2        2.41/+90%       -
27 # Westmere      1.88/+120%      -
28 # Sandy Bridge  1.39/+140%      1.10
29 # Haswell       1.14/+175%      1.11            0.65
30 # Skylake       1.13/+120%      0.96            0.51
31 # Silvermont    2.83/+95%       -
32 # VIA Nano      1.82/+150%      -
33 # Sledgehammer  1.38/+160%      -
34 # Bulldozer     2.30/+130%      0.97
35 #
36 # (*)   improvement coefficients relative to clang are more modest and
37 #       are ~50% on most processors, in both cases we are comparing to
38 #       __int128 code;
39 # (**)  SSE2 implementation was attempted, but among non-AVX processors
40 #       it was faster than integer-only code only on older Intel P4 and
41 #       Core processors, 50-30%, less newer processor is, but slower on
42 #       contemporary ones, for example almost 2x slower on Atom, and as
43 #       former are naturally disappearing, SSE2 is deemed unnecessary;
44
45 $flavour = shift;
46 $output  = shift;
47 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
48
49 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
50
51 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
52 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
53 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
54 die "can't locate x86_64-xlate.pl";
55
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
58         $avx = ($1>=2.19) + ($1>=2.22);
59 }
60
61 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
63         $avx = ($1>=2.09) + ($1>=2.10);
64 }
65
66 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
68         $avx = ($1>=10) + ($1>=12);
69 }
70
71 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
72         $avx = ($2>=3.0) + ($2>3.0);
73 }
74
75 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
76 *STDOUT=*OUT;
77
78 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
79 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
80 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
81 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
82
83 sub poly1305_iteration {
84 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
85 # output:       $h0-$h2 *= $r0-$r1
86 $code.=<<___;
87         mulq    $h0                     # h0*r1
88         mov     %rax,$d2
89          mov    $r0,%rax
90         mov     %rdx,$d3
91
92         mulq    $h0                     # h0*r0
93         mov     %rax,$h0                # future $h0
94          mov    $r0,%rax
95         mov     %rdx,$d1
96
97         mulq    $h1                     # h1*r0
98         add     %rax,$d2
99          mov    $s1,%rax
100         adc     %rdx,$d3
101
102         mulq    $h1                     # h1*s1
103          mov    $h2,$h1                 # borrow $h1
104         add     %rax,$h0
105         adc     %rdx,$d1
106
107         imulq   $s1,$h1                 # h2*s1
108         add     $h1,$d2
109          mov    $d1,$h1
110         adc     \$0,$d3
111
112         imulq   $r0,$h2                 # h2*r0
113         add     $d2,$h1
114         mov     \$-4,%rax               # mask value
115         adc     $h2,$d3
116
117         and     $d3,%rax                # last reduction step
118         mov     $d3,$h2
119         shr     \$2,$d3
120         and     \$3,$h2
121         add     $d3,%rax
122         add     %rax,$h0
123         adc     \$0,$h1
124         adc     \$0,$h2
125 ___
126 }
127
128 ########################################################################
129 # Layout of opaque area is following.
130 #
131 #       unsigned __int64 h[3];          # current hash value base 2^64
132 #       unsigned __int64 r[2];          # key value base 2^64
133
134 $code.=<<___;
135 .text
136
137 .extern OPENSSL_ia32cap_P
138
139 .globl  poly1305_init
140 .hidden poly1305_init
141 .globl  poly1305_blocks
142 .hidden poly1305_blocks
143 .globl  poly1305_emit
144 .hidden poly1305_emit
145
146 .type   poly1305_init,\@function,3
147 .align  32
148 poly1305_init:
149         xor     %rax,%rax
150         mov     %rax,0($ctx)            # initialize hash value
151         mov     %rax,8($ctx)
152         mov     %rax,16($ctx)
153
154         cmp     \$0,$inp
155         je      .Lno_key
156
157         lea     poly1305_blocks(%rip),%r10
158         lea     poly1305_emit(%rip),%r11
159 ___
160 $code.=<<___    if ($avx);
161         mov     OPENSSL_ia32cap_P+4(%rip),%r9
162         lea     poly1305_blocks_avx(%rip),%rax
163         lea     poly1305_emit_avx(%rip),%rcx
164         bt      \$`60-32`,%r9           # AVX?
165         cmovc   %rax,%r10
166         cmovc   %rcx,%r11
167 ___
168 $code.=<<___    if ($avx>1);
169         lea     poly1305_blocks_avx2(%rip),%rax
170         bt      \$`5+32`,%r9            # AVX2?
171         cmovc   %rax,%r10
172 ___
173 $code.=<<___;
174         mov     \$0x0ffffffc0fffffff,%rax
175         mov     \$0x0ffffffc0ffffffc,%rcx
176         and     0($inp),%rax
177         and     8($inp),%rcx
178         mov     %rax,24($ctx)
179         mov     %rcx,32($ctx)
180 ___
181 $code.=<<___    if ($flavour !~ /elf32/);
182         mov     %r10,0(%rdx)
183         mov     %r11,8(%rdx)
184 ___
185 $code.=<<___    if ($flavour =~ /elf32/);
186         mov     %r10d,0(%rdx)
187         mov     %r11d,4(%rdx)
188 ___
189 $code.=<<___;
190         mov     \$1,%eax
191 .Lno_key:
192         ret
193 .size   poly1305_init,.-poly1305_init
194
195 .type   poly1305_blocks,\@function,4
196 .align  32
197 poly1305_blocks:
198 .Lblocks:
199         shr     \$4,$len
200         jz      .Lno_data               # too short
201
202         push    %rbx
203         push    %rbp
204         push    %r12
205         push    %r13
206         push    %r14
207         push    %r15
208 .Lblocks_body:
209
210         mov     $len,%r15               # reassign $len
211
212         mov     24($ctx),$r0            # load r
213         mov     32($ctx),$s1
214
215         mov     0($ctx),$h0             # load hash value
216         mov     8($ctx),$h1
217         mov     16($ctx),$h2
218
219         mov     $s1,$r1
220         shr     \$2,$s1
221         mov     $r1,%rax
222         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
223         jmp     .Loop
224
225 .align  32
226 .Loop:
227         add     0($inp),$h0             # accumulate input
228         adc     8($inp),$h1
229         lea     16($inp),$inp
230         adc     $padbit,$h2
231 ___
232         &poly1305_iteration();
233 $code.=<<___;
234         mov     $r1,%rax
235         dec     %r15                    # len-=16
236         jnz     .Loop
237
238         mov     $h0,0($ctx)             # store hash value
239         mov     $h1,8($ctx)
240         mov     $h2,16($ctx)
241
242         mov     0(%rsp),%r15
243         mov     8(%rsp),%r14
244         mov     16(%rsp),%r13
245         mov     24(%rsp),%r12
246         mov     32(%rsp),%rbp
247         mov     40(%rsp),%rbx
248         lea     48(%rsp),%rsp
249 .Lno_data:
250 .Lblocks_epilogue:
251         ret
252 .size   poly1305_blocks,.-poly1305_blocks
253
254 .type   poly1305_emit,\@function,3
255 .align  32
256 poly1305_emit:
257 .Lemit:
258         mov     0($ctx),%r8     # load hash value
259         mov     8($ctx),%r9
260         mov     16($ctx),%r10
261
262         mov     %r8,%rax
263         add     \$5,%r8         # compare to modulus
264         mov     %r9,%rcx
265         adc     \$0,%r9
266         adc     \$0,%r10
267         shr     \$2,%r10        # did 130-bit value overfow?
268         cmovnz  %r8,%rax
269         cmovnz  %r9,%rcx
270
271         add     0($nonce),%rax  # accumulate nonce
272         adc     8($nonce),%rcx
273         mov     %rax,0($mac)    # write result
274         mov     %rcx,8($mac)
275
276         ret
277 .size   poly1305_emit,.-poly1305_emit
278 ___
279 if ($avx) {
280
281 ########################################################################
282 # Layout of opaque area is following.
283 #
284 #       unsigned __int32 h[5];          # current hash value base 2^26
285 #       unsigned __int32 is_base2_26;
286 #       unsigned __int64 r[2];          # key value base 2^64
287 #       unsigned __int64 pad;
288 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
289 #
290 # where r^n are base 2^26 digits of degrees of multiplier key. There are
291 # 5 digits, but last four are interleaved with multiples of 5, totalling
292 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
293
294 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
295     map("%xmm$_",(0..15));
296
297 $code.=<<___;
298 .type   __poly1305_block,\@abi-omnipotent
299 .align  32
300 __poly1305_block:
301 ___
302         &poly1305_iteration();
303 $code.=<<___;
304         ret
305 .size   __poly1305_block,.-__poly1305_block
306
307 .type   __poly1305_init_avx,\@abi-omnipotent
308 .align  32
309 __poly1305_init_avx:
310         mov     $r0,$h0
311         mov     $r1,$h1
312         xor     $h2,$h2
313
314         lea     48+64($ctx),$ctx        # size optimization
315
316         mov     $r1,%rax
317         call    __poly1305_block        # r^2
318
319         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
320         mov     \$0x3ffffff,%edx
321         mov     $h0,$d1
322         and     $h0#d,%eax
323         mov     $r0,$d2
324         and     $r0#d,%edx
325         mov     %eax,`16*0+0-64`($ctx)
326         shr     \$26,$d1
327         mov     %edx,`16*0+4-64`($ctx)
328         shr     \$26,$d2
329
330         mov     \$0x3ffffff,%eax
331         mov     \$0x3ffffff,%edx
332         and     $d1#d,%eax
333         and     $d2#d,%edx
334         mov     %eax,`16*1+0-64`($ctx)
335         lea     (%rax,%rax,4),%eax      # *5
336         mov     %edx,`16*1+4-64`($ctx)
337         lea     (%rdx,%rdx,4),%edx      # *5
338         mov     %eax,`16*2+0-64`($ctx)
339         shr     \$26,$d1
340         mov     %edx,`16*2+4-64`($ctx)
341         shr     \$26,$d2
342
343         mov     $h1,%rax
344         mov     $r1,%rdx
345         shl     \$12,%rax
346         shl     \$12,%rdx
347         or      $d1,%rax
348         or      $d2,%rdx
349         and     \$0x3ffffff,%eax
350         and     \$0x3ffffff,%edx
351         mov     %eax,`16*3+0-64`($ctx)
352         lea     (%rax,%rax,4),%eax      # *5
353         mov     %edx,`16*3+4-64`($ctx)
354         lea     (%rdx,%rdx,4),%edx      # *5
355         mov     %eax,`16*4+0-64`($ctx)
356         mov     $h1,$d1
357         mov     %edx,`16*4+4-64`($ctx)
358         mov     $r1,$d2
359
360         mov     \$0x3ffffff,%eax
361         mov     \$0x3ffffff,%edx
362         shr     \$14,$d1
363         shr     \$14,$d2
364         and     $d1#d,%eax
365         and     $d2#d,%edx
366         mov     %eax,`16*5+0-64`($ctx)
367         lea     (%rax,%rax,4),%eax      # *5
368         mov     %edx,`16*5+4-64`($ctx)
369         lea     (%rdx,%rdx,4),%edx      # *5
370         mov     %eax,`16*6+0-64`($ctx)
371         shr     \$26,$d1
372         mov     %edx,`16*6+4-64`($ctx)
373         shr     \$26,$d2
374
375         mov     $h2,%rax
376         shl     \$24,%rax
377         or      %rax,$d1
378         mov     $d1#d,`16*7+0-64`($ctx)
379         lea     ($d1,$d1,4),$d1         # *5
380         mov     $d2#d,`16*7+4-64`($ctx)
381         lea     ($d2,$d2,4),$d2         # *5
382         mov     $d1#d,`16*8+0-64`($ctx)
383         mov     $d2#d,`16*8+4-64`($ctx)
384
385         mov     $r1,%rax
386         call    __poly1305_block        # r^3
387
388         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
389         mov     $h0,$d1
390         and     $h0#d,%eax
391         shr     \$26,$d1
392         mov     %eax,`16*0+12-64`($ctx)
393
394         mov     \$0x3ffffff,%edx
395         and     $d1#d,%edx
396         mov     %edx,`16*1+12-64`($ctx)
397         lea     (%rdx,%rdx,4),%edx      # *5
398         shr     \$26,$d1
399         mov     %edx,`16*2+12-64`($ctx)
400
401         mov     $h1,%rax
402         shl     \$12,%rax
403         or      $d1,%rax
404         and     \$0x3ffffff,%eax
405         mov     %eax,`16*3+12-64`($ctx)
406         lea     (%rax,%rax,4),%eax      # *5
407         mov     $h1,$d1
408         mov     %eax,`16*4+12-64`($ctx)
409
410         mov     \$0x3ffffff,%edx
411         shr     \$14,$d1
412         and     $d1#d,%edx
413         mov     %edx,`16*5+12-64`($ctx)
414         lea     (%rdx,%rdx,4),%edx      # *5
415         shr     \$26,$d1
416         mov     %edx,`16*6+12-64`($ctx)
417
418         mov     $h2,%rax
419         shl     \$24,%rax
420         or      %rax,$d1
421         mov     $d1#d,`16*7+12-64`($ctx)
422         lea     ($d1,$d1,4),$d1         # *5
423         mov     $d1#d,`16*8+12-64`($ctx)
424
425         mov     $r1,%rax
426         call    __poly1305_block        # r^4
427
428         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
429         mov     $h0,$d1
430         and     $h0#d,%eax
431         shr     \$26,$d1
432         mov     %eax,`16*0+8-64`($ctx)
433
434         mov     \$0x3ffffff,%edx
435         and     $d1#d,%edx
436         mov     %edx,`16*1+8-64`($ctx)
437         lea     (%rdx,%rdx,4),%edx      # *5
438         shr     \$26,$d1
439         mov     %edx,`16*2+8-64`($ctx)
440
441         mov     $h1,%rax
442         shl     \$12,%rax
443         or      $d1,%rax
444         and     \$0x3ffffff,%eax
445         mov     %eax,`16*3+8-64`($ctx)
446         lea     (%rax,%rax,4),%eax      # *5
447         mov     $h1,$d1
448         mov     %eax,`16*4+8-64`($ctx)
449
450         mov     \$0x3ffffff,%edx
451         shr     \$14,$d1
452         and     $d1#d,%edx
453         mov     %edx,`16*5+8-64`($ctx)
454         lea     (%rdx,%rdx,4),%edx      # *5
455         shr     \$26,$d1
456         mov     %edx,`16*6+8-64`($ctx)
457
458         mov     $h2,%rax
459         shl     \$24,%rax
460         or      %rax,$d1
461         mov     $d1#d,`16*7+8-64`($ctx)
462         lea     ($d1,$d1,4),$d1         # *5
463         mov     $d1#d,`16*8+8-64`($ctx)
464
465         lea     -48-64($ctx),$ctx       # size [de-]optimization
466         ret
467 .size   __poly1305_init_avx,.-__poly1305_init_avx
468
469 .type   poly1305_blocks_avx,\@function,4
470 .align  32
471 poly1305_blocks_avx:
472         mov     20($ctx),%r8d           # is_base2_26
473         cmp     \$128,$len
474         jae     .Lblocks_avx
475         test    %r8d,%r8d
476         jz      .Lblocks
477
478 .Lblocks_avx:
479         and     \$-16,$len
480         jz      .Lno_data_avx
481
482         vzeroupper
483
484         test    %r8d,%r8d
485         jz      .Lbase2_64_avx
486
487         test    \$31,$len
488         jz      .Leven_avx
489
490         push    %rbx
491         push    %rbp
492         push    %r12
493         push    %r13
494         push    %r14
495         push    %r15
496 .Lblocks_avx_body:
497
498         mov     $len,%r15               # reassign $len
499
500         mov     0($ctx),$d1             # load hash value
501         mov     8($ctx),$d2
502         mov     16($ctx),$h2#d
503
504         mov     24($ctx),$r0            # load r
505         mov     32($ctx),$s1
506
507         ################################# base 2^26 -> base 2^64
508         mov     $d1#d,$h0#d
509         and     \$`-1*(1<<31)`,$d1
510         mov     $d2,$r1                 # borrow $r1
511         mov     $d2#d,$h1#d
512         and     \$`-1*(1<<31)`,$d2
513
514         shr     \$6,$d1
515         shl     \$52,$r1
516         add     $d1,$h0
517         shr     \$12,$h1
518         shr     \$18,$d2
519         add     $r1,$h0
520         adc     $d2,$h1
521
522         mov     $h2,$d1
523         shl     \$40,$d1
524         shr     \$24,$h2
525         add     $d1,$h1
526         adc     \$0,$h2                 # can be partially reduced...
527
528         mov     \$-4,$d2                # ... so reduce
529         mov     $h2,$d1
530         and     $h2,$d2
531         shr     \$2,$d1
532         and     \$3,$h2
533         add     $d2,$d1                 # =*5
534         add     $d1,$h0
535         adc     \$0,$h1
536         adc     \$0,$h2
537
538         mov     $s1,$r1
539         mov     $s1,%rax
540         shr     \$2,$s1
541         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
542
543         add     0($inp),$h0             # accumulate input
544         adc     8($inp),$h1
545         lea     16($inp),$inp
546         adc     $padbit,$h2
547
548         call    __poly1305_block
549
550         test    $padbit,$padbit         # if $padbit is zero,
551         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
552
553         ################################# base 2^64 -> base 2^26
554         mov     $h0,%rax
555         mov     $h0,%rdx
556         shr     \$52,$h0
557         mov     $h1,$r0
558         mov     $h1,$r1
559         shr     \$26,%rdx
560         and     \$0x3ffffff,%rax        # h[0]
561         shl     \$12,$r0
562         and     \$0x3ffffff,%rdx        # h[1]
563         shr     \$14,$h1
564         or      $r0,$h0
565         shl     \$24,$h2
566         and     \$0x3ffffff,$h0         # h[2]
567         shr     \$40,$r1
568         and     \$0x3ffffff,$h1         # h[3]
569         or      $r1,$h2                 # h[4]
570
571         sub     \$16,%r15
572         jz      .Lstore_base2_26_avx
573
574         vmovd   %rax#d,$H0
575         vmovd   %rdx#d,$H1
576         vmovd   $h0#d,$H2
577         vmovd   $h1#d,$H3
578         vmovd   $h2#d,$H4
579         jmp     .Lproceed_avx
580
581 .align  32
582 .Lstore_base2_64_avx:
583         mov     $h0,0($ctx)
584         mov     $h1,8($ctx)
585         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
586         jmp     .Ldone_avx
587
588 .align  16
589 .Lstore_base2_26_avx:
590         mov     %rax#d,0($ctx)          # store hash value base 2^26
591         mov     %rdx#d,4($ctx)
592         mov     $h0#d,8($ctx)
593         mov     $h1#d,12($ctx)
594         mov     $h2#d,16($ctx)
595 .align  16
596 .Ldone_avx:
597         mov     0(%rsp),%r15
598         mov     8(%rsp),%r14
599         mov     16(%rsp),%r13
600         mov     24(%rsp),%r12
601         mov     32(%rsp),%rbp
602         mov     40(%rsp),%rbx
603         lea     48(%rsp),%rsp
604 .Lno_data_avx:
605 .Lblocks_avx_epilogue:
606         ret
607
608 .align  32
609 .Lbase2_64_avx:
610         push    %rbx
611         push    %rbp
612         push    %r12
613         push    %r13
614         push    %r14
615         push    %r15
616 .Lbase2_64_avx_body:
617
618         mov     $len,%r15               # reassign $len
619
620         mov     24($ctx),$r0            # load r
621         mov     32($ctx),$s1
622
623         mov     0($ctx),$h0             # load hash value
624         mov     8($ctx),$h1
625         mov     16($ctx),$h2#d
626
627         mov     $s1,$r1
628         mov     $s1,%rax
629         shr     \$2,$s1
630         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
631
632         test    \$31,$len
633         jz      .Linit_avx
634
635         add     0($inp),$h0             # accumulate input
636         adc     8($inp),$h1
637         lea     16($inp),$inp
638         adc     $padbit,$h2
639         sub     \$16,%r15
640
641         call    __poly1305_block
642
643 .Linit_avx:
644         ################################# base 2^64 -> base 2^26
645         mov     $h0,%rax
646         mov     $h0,%rdx
647         shr     \$52,$h0
648         mov     $h1,$d1
649         mov     $h1,$d2
650         shr     \$26,%rdx
651         and     \$0x3ffffff,%rax        # h[0]
652         shl     \$12,$d1
653         and     \$0x3ffffff,%rdx        # h[1]
654         shr     \$14,$h1
655         or      $d1,$h0
656         shl     \$24,$h2
657         and     \$0x3ffffff,$h0         # h[2]
658         shr     \$40,$d2
659         and     \$0x3ffffff,$h1         # h[3]
660         or      $d2,$h2                 # h[4]
661
662         vmovd   %rax#d,$H0
663         vmovd   %rdx#d,$H1
664         vmovd   $h0#d,$H2
665         vmovd   $h1#d,$H3
666         vmovd   $h2#d,$H4
667         movl    \$1,20($ctx)            # set is_base2_26
668
669         call    __poly1305_init_avx
670
671 .Lproceed_avx:
672         mov     %r15,$len
673
674         mov     0(%rsp),%r15
675         mov     8(%rsp),%r14
676         mov     16(%rsp),%r13
677         mov     24(%rsp),%r12
678         mov     32(%rsp),%rbp
679         mov     40(%rsp),%rbx
680         lea     48(%rsp),%rax
681         lea     48(%rsp),%rsp
682 .Lbase2_64_avx_epilogue:
683         jmp     .Ldo_avx
684
685 .align  32
686 .Leven_avx:
687         vmovd           4*0($ctx),$H0           # load hash value
688         vmovd           4*1($ctx),$H1
689         vmovd           4*2($ctx),$H2
690         vmovd           4*3($ctx),$H3
691         vmovd           4*4($ctx),$H4
692
693 .Ldo_avx:
694 ___
695 $code.=<<___    if (!$win64);
696         lea             -0x58(%rsp),%r11
697         sub             \$0x178,%rsp
698 ___
699 $code.=<<___    if ($win64);
700         lea             -0xf8(%rsp),%r11
701         sub             \$0x218,%rsp
702         vmovdqa         %xmm6,0x50(%r11)
703         vmovdqa         %xmm7,0x60(%r11)
704         vmovdqa         %xmm8,0x70(%r11)
705         vmovdqa         %xmm9,0x80(%r11)
706         vmovdqa         %xmm10,0x90(%r11)
707         vmovdqa         %xmm11,0xa0(%r11)
708         vmovdqa         %xmm12,0xb0(%r11)
709         vmovdqa         %xmm13,0xc0(%r11)
710         vmovdqa         %xmm14,0xd0(%r11)
711         vmovdqa         %xmm15,0xe0(%r11)
712 .Ldo_avx_body:
713 ___
714 $code.=<<___;
715         sub             \$64,$len
716         lea             -32($inp),%rax
717         cmovc           %rax,$inp
718
719         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
720         lea             `16*3+64`($ctx),$ctx    # size optimization
721         lea             .Lconst(%rip),%rcx
722
723         ################################################################
724         # load input
725         vmovdqu         16*2($inp),$T0
726         vmovdqu         16*3($inp),$T1
727         vmovdqa         64(%rcx),$MASK          # .Lmask26
728
729         vpsrldq         \$6,$T0,$T2             # splat input
730         vpsrldq         \$6,$T1,$T3
731         vpunpckhqdq     $T1,$T0,$T4             # 4
732         vpunpcklqdq     $T1,$T0,$T0             # 0:1
733         vpunpcklqdq     $T3,$T2,$T3             # 2:3
734
735         vpsrlq          \$40,$T4,$T4            # 4
736         vpsrlq          \$26,$T0,$T1
737         vpand           $MASK,$T0,$T0           # 0
738         vpsrlq          \$4,$T3,$T2
739         vpand           $MASK,$T1,$T1           # 1
740         vpsrlq          \$30,$T3,$T3
741         vpand           $MASK,$T2,$T2           # 2
742         vpand           $MASK,$T3,$T3           # 3
743         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
744
745         jbe             .Lskip_loop_avx
746
747         # expand and copy pre-calculated table to stack
748         vmovdqu         `16*1-64`($ctx),$D1
749         vmovdqu         `16*2-64`($ctx),$D2
750         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
751         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
752         vmovdqa         $D3,-0x90(%r11)
753         vmovdqa         $D0,0x00(%rsp)
754         vpshufd         \$0xEE,$D1,$D4
755         vmovdqu         `16*3-64`($ctx),$D0
756         vpshufd         \$0x44,$D1,$D1
757         vmovdqa         $D4,-0x80(%r11)
758         vmovdqa         $D1,0x10(%rsp)
759         vpshufd         \$0xEE,$D2,$D3
760         vmovdqu         `16*4-64`($ctx),$D1
761         vpshufd         \$0x44,$D2,$D2
762         vmovdqa         $D3,-0x70(%r11)
763         vmovdqa         $D2,0x20(%rsp)
764         vpshufd         \$0xEE,$D0,$D4
765         vmovdqu         `16*5-64`($ctx),$D2
766         vpshufd         \$0x44,$D0,$D0
767         vmovdqa         $D4,-0x60(%r11)
768         vmovdqa         $D0,0x30(%rsp)
769         vpshufd         \$0xEE,$D1,$D3
770         vmovdqu         `16*6-64`($ctx),$D0
771         vpshufd         \$0x44,$D1,$D1
772         vmovdqa         $D3,-0x50(%r11)
773         vmovdqa         $D1,0x40(%rsp)
774         vpshufd         \$0xEE,$D2,$D4
775         vmovdqu         `16*7-64`($ctx),$D1
776         vpshufd         \$0x44,$D2,$D2
777         vmovdqa         $D4,-0x40(%r11)
778         vmovdqa         $D2,0x50(%rsp)
779         vpshufd         \$0xEE,$D0,$D3
780         vmovdqu         `16*8-64`($ctx),$D2
781         vpshufd         \$0x44,$D0,$D0
782         vmovdqa         $D3,-0x30(%r11)
783         vmovdqa         $D0,0x60(%rsp)
784         vpshufd         \$0xEE,$D1,$D4
785         vpshufd         \$0x44,$D1,$D1
786         vmovdqa         $D4,-0x20(%r11)
787         vmovdqa         $D1,0x70(%rsp)
788         vpshufd         \$0xEE,$D2,$D3
789          vmovdqa        0x00(%rsp),$D4          # preload r0^2
790         vpshufd         \$0x44,$D2,$D2
791         vmovdqa         $D3,-0x10(%r11)
792         vmovdqa         $D2,0x80(%rsp)
793
794         jmp             .Loop_avx
795
796 .align  32
797 .Loop_avx:
798         ################################################################
799         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
800         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
801         #   \___________________/
802         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
803         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
804         #   \___________________/ \____________________/
805         #
806         # Note that we start with inp[2:3]*r^2. This is because it
807         # doesn't depend on reduction in previous iteration.
808         ################################################################
809         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
810         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
811         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
812         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
813         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
814         #
815         # though note that $Tx and $Hx are "reversed" in this section,
816         # and $D4 is preloaded with r0^2...
817
818         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
819         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
820           vmovdqa       $H2,0x20(%r11)                          # offload hash
821         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
822          vmovdqa        0x10(%rsp),$H2          # r1^2
823         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
824         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
825
826           vmovdqa       $H0,0x00(%r11)                          #
827         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
828           vmovdqa       $H1,0x10(%r11)                          #
829         vpmuludq        $T3,$H2,$H1             # h3*r1
830         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
831         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
832           vmovdqa       $H3,0x30(%r11)                          #
833         vpmuludq        $T2,$H2,$H0             # h2*r1
834         vpmuludq        $T1,$H2,$H1             # h1*r1
835         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
836          vmovdqa        0x30(%rsp),$H3          # r2^2
837         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
838           vmovdqa       $H4,0x40(%r11)                          #
839         vpmuludq        $T0,$H2,$H2             # h0*r1
840          vpmuludq       $T2,$H3,$H0             # h2*r2
841         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
842
843          vmovdqa        0x40(%rsp),$H4          # s2^2
844         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
845         vpmuludq        $T1,$H3,$H1             # h1*r2
846         vpmuludq        $T0,$H3,$H3             # h0*r2
847         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
848          vmovdqa        0x50(%rsp),$H2          # r3^2
849         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
850         vpmuludq        $T4,$H4,$H0             # h4*s2
851         vpmuludq        $T3,$H4,$H4             # h3*s2
852         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
853          vmovdqa        0x60(%rsp),$H3          # s3^2
854         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
855
856          vmovdqa        0x80(%rsp),$H4          # s4^2
857         vpmuludq        $T1,$H2,$H1             # h1*r3
858         vpmuludq        $T0,$H2,$H2             # h0*r3
859         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
860         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
861         vpmuludq        $T4,$H3,$H0             # h4*s3
862         vpmuludq        $T3,$H3,$H1             # h3*s3
863         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
864          vmovdqu        16*0($inp),$H0                          # load input
865         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
866         vpmuludq        $T2,$H3,$H3             # h2*s3
867          vpmuludq       $T2,$H4,$T2             # h2*s4
868         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
869
870          vmovdqu        16*1($inp),$H1                          #
871         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
872         vpmuludq        $T3,$H4,$T3             # h3*s4
873         vpmuludq        $T4,$H4,$T4             # h4*s4
874          vpsrldq        \$6,$H0,$H2                             # splat input
875         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
876         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
877          vpsrldq        \$6,$H1,$H3                             #
878         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
879         vpmuludq        $T1,$H4,$T0             # h1*s4
880          vpunpckhqdq    $H1,$H0,$H4             # 4
881         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
882          vmovdqa        -0x90(%r11),$T4         # r0^4
883         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
884
885         vpunpcklqdq     $H1,$H0,$H0             # 0:1
886         vpunpcklqdq     $H3,$H2,$H3             # 2:3
887
888         #vpsrlq         \$40,$H4,$H4            # 4
889         vpsrldq         \$`40/8`,$H4,$H4        # 4
890         vpsrlq          \$26,$H0,$H1
891         vpand           $MASK,$H0,$H0           # 0
892         vpsrlq          \$4,$H3,$H2
893         vpand           $MASK,$H1,$H1           # 1
894         vpand           0(%rcx),$H4,$H4         # .Lmask24
895         vpsrlq          \$30,$H3,$H3
896         vpand           $MASK,$H2,$H2           # 2
897         vpand           $MASK,$H3,$H3           # 3
898         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
899
900         vpaddq          0x00(%r11),$H0,$H0      # add hash value
901         vpaddq          0x10(%r11),$H1,$H1
902         vpaddq          0x20(%r11),$H2,$H2
903         vpaddq          0x30(%r11),$H3,$H3
904         vpaddq          0x40(%r11),$H4,$H4
905
906         lea             16*2($inp),%rax
907         lea             16*4($inp),$inp
908         sub             \$64,$len
909         cmovc           %rax,$inp
910
911         ################################################################
912         # Now we accumulate (inp[0:1]+hash)*r^4
913         ################################################################
914         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
915         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
916         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
917         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
918         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
919
920         vpmuludq        $H0,$T4,$T0             # h0*r0
921         vpmuludq        $H1,$T4,$T1             # h1*r0
922         vpaddq          $T0,$D0,$D0
923         vpaddq          $T1,$D1,$D1
924          vmovdqa        -0x80(%r11),$T2         # r1^4
925         vpmuludq        $H2,$T4,$T0             # h2*r0
926         vpmuludq        $H3,$T4,$T1             # h3*r0
927         vpaddq          $T0,$D2,$D2
928         vpaddq          $T1,$D3,$D3
929         vpmuludq        $H4,$T4,$T4             # h4*r0
930          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
931         vpaddq          $T4,$D4,$D4
932
933         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
934         vpmuludq        $H2,$T2,$T1             # h2*r1
935         vpmuludq        $H3,$T2,$T0             # h3*r1
936         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
937          vmovdqa        -0x60(%r11),$T3         # r2^4
938         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
939         vpmuludq        $H1,$T2,$T1             # h1*r1
940         vpmuludq        $H0,$T2,$T2             # h0*r1
941         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
942         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
943
944          vmovdqa        -0x50(%r11),$T4         # s2^4
945         vpmuludq        $H2,$T3,$T0             # h2*r2
946         vpmuludq        $H1,$T3,$T1             # h1*r2
947         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
948         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
949          vmovdqa        -0x40(%r11),$T2         # r3^4
950         vpmuludq        $H0,$T3,$T3             # h0*r2
951         vpmuludq        $H4,$T4,$T0             # h4*s2
952         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
953         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
954          vmovdqa        -0x30(%r11),$T3         # s3^4
955         vpmuludq        $H3,$T4,$T4             # h3*s2
956          vpmuludq       $H1,$T2,$T1             # h1*r3
957         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
958
959          vmovdqa        -0x10(%r11),$T4         # s4^4
960         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
961         vpmuludq        $H0,$T2,$T2             # h0*r3
962         vpmuludq        $H4,$T3,$T0             # h4*s3
963         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
964         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
965          vmovdqu        16*2($inp),$T0                          # load input
966         vpmuludq        $H3,$T3,$T2             # h3*s3
967         vpmuludq        $H2,$T3,$T3             # h2*s3
968         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
969          vmovdqu        16*3($inp),$T1                          #
970         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
971
972         vpmuludq        $H2,$T4,$H2             # h2*s4
973         vpmuludq        $H3,$T4,$H3             # h3*s4
974          vpsrldq        \$6,$T0,$T2                             # splat input
975         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
976         vpmuludq        $H4,$T4,$H4             # h4*s4
977          vpsrldq        \$6,$T1,$T3                             #
978         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
979         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
980         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
981         vpmuludq        $H1,$T4,$H0
982          vpunpckhqdq    $T1,$T0,$T4             # 4
983         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
984         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
985
986         vpunpcklqdq     $T1,$T0,$T0             # 0:1
987         vpunpcklqdq     $T3,$T2,$T3             # 2:3
988
989         #vpsrlq         \$40,$T4,$T4            # 4
990         vpsrldq         \$`40/8`,$T4,$T4        # 4
991         vpsrlq          \$26,$T0,$T1
992          vmovdqa        0x00(%rsp),$D4          # preload r0^2
993         vpand           $MASK,$T0,$T0           # 0
994         vpsrlq          \$4,$T3,$T2
995         vpand           $MASK,$T1,$T1           # 1
996         vpand           0(%rcx),$T4,$T4         # .Lmask24
997         vpsrlq          \$30,$T3,$T3
998         vpand           $MASK,$T2,$T2           # 2
999         vpand           $MASK,$T3,$T3           # 3
1000         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1001
1002         ################################################################
1003         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1004         # and P. Schwabe
1005
1006         vpsrlq          \$26,$H3,$D3
1007         vpand           $MASK,$H3,$H3
1008         vpaddq          $D3,$H4,$H4             # h3 -> h4
1009
1010         vpsrlq          \$26,$H0,$D0
1011         vpand           $MASK,$H0,$H0
1012         vpaddq          $D0,$D1,$H1             # h0 -> h1
1013
1014         vpsrlq          \$26,$H4,$D0
1015         vpand           $MASK,$H4,$H4
1016
1017         vpsrlq          \$26,$H1,$D1
1018         vpand           $MASK,$H1,$H1
1019         vpaddq          $D1,$H2,$H2             # h1 -> h2
1020
1021         vpaddq          $D0,$H0,$H0
1022         vpsllq          \$2,$D0,$D0
1023         vpaddq          $D0,$H0,$H0             # h4 -> h0
1024
1025         vpsrlq          \$26,$H2,$D2
1026         vpand           $MASK,$H2,$H2
1027         vpaddq          $D2,$H3,$H3             # h2 -> h3
1028
1029         vpsrlq          \$26,$H0,$D0
1030         vpand           $MASK,$H0,$H0
1031         vpaddq          $D0,$H1,$H1             # h0 -> h1
1032
1033         vpsrlq          \$26,$H3,$D3
1034         vpand           $MASK,$H3,$H3
1035         vpaddq          $D3,$H4,$H4             # h3 -> h4
1036
1037         ja              .Loop_avx
1038
1039 .Lskip_loop_avx:
1040         ################################################################
1041         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1042
1043         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1044         add             \$32,$len
1045         jnz             .Long_tail_avx
1046
1047         vpaddq          $H2,$T2,$T2
1048         vpaddq          $H0,$T0,$T0
1049         vpaddq          $H1,$T1,$T1
1050         vpaddq          $H3,$T3,$T3
1051         vpaddq          $H4,$T4,$T4
1052
1053 .Long_tail_avx:
1054         vmovdqa         $H2,0x20(%r11)
1055         vmovdqa         $H0,0x00(%r11)
1056         vmovdqa         $H1,0x10(%r11)
1057         vmovdqa         $H3,0x30(%r11)
1058         vmovdqa         $H4,0x40(%r11)
1059
1060         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1061         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1062         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1063         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1064         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1065
1066         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1067         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1068          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1069         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1070         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1071         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1072
1073         vpmuludq        $T3,$H2,$H0             # h3*r1
1074         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1075          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1076         vpmuludq        $T2,$H2,$H1             # h2*r1
1077         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1078          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1079         vpmuludq        $T1,$H2,$H0             # h1*r1
1080         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1081         vpmuludq        $T0,$H2,$H2             # h0*r1
1082         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1083         vpmuludq        $T4,$H3,$H3             # h4*s1
1084         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1085
1086          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1087         vpmuludq        $T2,$H4,$H1             # h2*r2
1088         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1089         vpmuludq        $T1,$H4,$H0             # h1*r2
1090         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1091          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1092         vpmuludq        $T0,$H4,$H4             # h0*r2
1093         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1094         vpmuludq        $T4,$H2,$H1             # h4*s2
1095         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1096          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1097         vpmuludq        $T3,$H2,$H2             # h3*s2
1098         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1099
1100         vpmuludq        $T1,$H3,$H0             # h1*r3
1101         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1102         vpmuludq        $T0,$H3,$H3             # h0*r3
1103         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1104          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1105         vpmuludq        $T4,$H4,$H1             # h4*s3
1106         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1107          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1108         vpmuludq        $T3,$H4,$H0             # h3*s3
1109         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1110         vpmuludq        $T2,$H4,$H4             # h2*s3
1111         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1112
1113         vpmuludq        $T0,$H2,$H2             # h0*r4
1114         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1115         vpmuludq        $T4,$H3,$H1             # h4*s4
1116         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1117         vpmuludq        $T3,$H3,$H0             # h3*s4
1118         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1119         vpmuludq        $T2,$H3,$H1             # h2*s4
1120         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1121         vpmuludq        $T1,$H3,$H3             # h1*s4
1122         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1123
1124         jz              .Lshort_tail_avx
1125
1126         vmovdqu         16*0($inp),$H0          # load input
1127         vmovdqu         16*1($inp),$H1
1128
1129         vpsrldq         \$6,$H0,$H2             # splat input
1130         vpsrldq         \$6,$H1,$H3
1131         vpunpckhqdq     $H1,$H0,$H4             # 4
1132         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1133         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1134
1135         vpsrlq          \$40,$H4,$H4            # 4
1136         vpsrlq          \$26,$H0,$H1
1137         vpand           $MASK,$H0,$H0           # 0
1138         vpsrlq          \$4,$H3,$H2
1139         vpand           $MASK,$H1,$H1           # 1
1140         vpsrlq          \$30,$H3,$H3
1141         vpand           $MASK,$H2,$H2           # 2
1142         vpand           $MASK,$H3,$H3           # 3
1143         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1144
1145         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1146         vpaddq          0x00(%r11),$H0,$H0
1147         vpaddq          0x10(%r11),$H1,$H1
1148         vpaddq          0x20(%r11),$H2,$H2
1149         vpaddq          0x30(%r11),$H3,$H3
1150         vpaddq          0x40(%r11),$H4,$H4
1151
1152         ################################################################
1153         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1154
1155         vpmuludq        $H0,$T4,$T0             # h0*r0
1156         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1157         vpmuludq        $H1,$T4,$T1             # h1*r0
1158         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1159         vpmuludq        $H2,$T4,$T0             # h2*r0
1160         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1161          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1162         vpmuludq        $H3,$T4,$T1             # h3*r0
1163         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1164         vpmuludq        $H4,$T4,$T4             # h4*r0
1165         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1166
1167         vpmuludq        $H3,$T2,$T0             # h3*r1
1168         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1169          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1170         vpmuludq        $H2,$T2,$T1             # h2*r1
1171         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1172          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1173         vpmuludq        $H1,$T2,$T0             # h1*r1
1174         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1175         vpmuludq        $H0,$T2,$T2             # h0*r1
1176         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1177         vpmuludq        $H4,$T3,$T3             # h4*s1
1178         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1179
1180          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1181         vpmuludq        $H2,$T4,$T1             # h2*r2
1182         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1183         vpmuludq        $H1,$T4,$T0             # h1*r2
1184         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1185          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1186         vpmuludq        $H0,$T4,$T4             # h0*r2
1187         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1188         vpmuludq        $H4,$T2,$T1             # h4*s2
1189         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1190          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1191         vpmuludq        $H3,$T2,$T2             # h3*s2
1192         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1193
1194         vpmuludq        $H1,$T3,$T0             # h1*r3
1195         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1196         vpmuludq        $H0,$T3,$T3             # h0*r3
1197         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1198          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1199         vpmuludq        $H4,$T4,$T1             # h4*s3
1200         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1201          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1202         vpmuludq        $H3,$T4,$T0             # h3*s3
1203         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1204         vpmuludq        $H2,$T4,$T4             # h2*s3
1205         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1206
1207         vpmuludq        $H0,$T2,$T2             # h0*r4
1208         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1209         vpmuludq        $H4,$T3,$T1             # h4*s4
1210         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1211         vpmuludq        $H3,$T3,$T0             # h3*s4
1212         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1213         vpmuludq        $H2,$T3,$T1             # h2*s4
1214         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1215         vpmuludq        $H1,$T3,$T3             # h1*s4
1216         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1217
1218 .Lshort_tail_avx:
1219         ################################################################
1220         # horizontal addition
1221
1222         vpsrldq         \$8,$D4,$T4
1223         vpsrldq         \$8,$D3,$T3
1224         vpsrldq         \$8,$D1,$T1
1225         vpsrldq         \$8,$D0,$T0
1226         vpsrldq         \$8,$D2,$T2
1227         vpaddq          $T3,$D3,$D3
1228         vpaddq          $T4,$D4,$D4
1229         vpaddq          $T0,$D0,$D0
1230         vpaddq          $T1,$D1,$D1
1231         vpaddq          $T2,$D2,$D2
1232
1233         ################################################################
1234         # lazy reduction
1235
1236         vpsrlq          \$26,$D3,$H3
1237         vpand           $MASK,$D3,$D3
1238         vpaddq          $H3,$D4,$D4             # h3 -> h4
1239
1240         vpsrlq          \$26,$D0,$H0
1241         vpand           $MASK,$D0,$D0
1242         vpaddq          $H0,$D1,$D1             # h0 -> h1
1243
1244         vpsrlq          \$26,$D4,$H4
1245         vpand           $MASK,$D4,$D4
1246
1247         vpsrlq          \$26,$D1,$H1
1248         vpand           $MASK,$D1,$D1
1249         vpaddq          $H1,$D2,$D2             # h1 -> h2
1250
1251         vpaddq          $H4,$D0,$D0
1252         vpsllq          \$2,$H4,$H4
1253         vpaddq          $H4,$D0,$D0             # h4 -> h0
1254
1255         vpsrlq          \$26,$D2,$H2
1256         vpand           $MASK,$D2,$D2
1257         vpaddq          $H2,$D3,$D3             # h2 -> h3
1258
1259         vpsrlq          \$26,$D0,$H0
1260         vpand           $MASK,$D0,$D0
1261         vpaddq          $H0,$D1,$D1             # h0 -> h1
1262
1263         vpsrlq          \$26,$D3,$H3
1264         vpand           $MASK,$D3,$D3
1265         vpaddq          $H3,$D4,$D4             # h3 -> h4
1266
1267         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1268         vmovd           $D1,`4*1-48-64`($ctx)
1269         vmovd           $D2,`4*2-48-64`($ctx)
1270         vmovd           $D3,`4*3-48-64`($ctx)
1271         vmovd           $D4,`4*4-48-64`($ctx)
1272 ___
1273 $code.=<<___    if ($win64);
1274         vmovdqa         0x50(%r11),%xmm6
1275         vmovdqa         0x60(%r11),%xmm7
1276         vmovdqa         0x70(%r11),%xmm8
1277         vmovdqa         0x80(%r11),%xmm9
1278         vmovdqa         0x90(%r11),%xmm10
1279         vmovdqa         0xa0(%r11),%xmm11
1280         vmovdqa         0xb0(%r11),%xmm12
1281         vmovdqa         0xc0(%r11),%xmm13
1282         vmovdqa         0xd0(%r11),%xmm14
1283         vmovdqa         0xe0(%r11),%xmm15
1284         lea             0xf8(%r11),%rsp
1285 .Ldo_avx_epilogue:
1286 ___
1287 $code.=<<___    if (!$win64);
1288         lea             0x58(%r11),%rsp
1289 ___
1290 $code.=<<___;
1291         vzeroupper
1292         ret
1293 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1294
1295 .type   poly1305_emit_avx,\@function,3
1296 .align  32
1297 poly1305_emit_avx:
1298         cmpl    \$0,20($ctx)    # is_base2_26?
1299         je      .Lemit
1300
1301         mov     0($ctx),%eax    # load hash value base 2^26
1302         mov     4($ctx),%ecx
1303         mov     8($ctx),%r8d
1304         mov     12($ctx),%r11d
1305         mov     16($ctx),%r10d
1306
1307         shl     \$26,%rcx       # base 2^26 -> base 2^64
1308         mov     %r8,%r9
1309         shl     \$52,%r8
1310         add     %rcx,%rax
1311         shr     \$12,%r9
1312         add     %rax,%r8        # h0
1313         adc     \$0,%r9
1314
1315         shl     \$14,%r11
1316         mov     %r10,%rax
1317         shr     \$24,%r10
1318         add     %r11,%r9
1319         shl     \$40,%rax
1320         add     %rax,%r9        # h1
1321         adc     \$0,%r10        # h2
1322
1323         mov     %r10,%rax       # could be partially reduced, so reduce
1324         mov     %r10,%rcx
1325         and     \$3,%r10
1326         shr     \$2,%rax
1327         and     \$-4,%rcx
1328         add     %rcx,%rax
1329         add     %rax,%r8
1330         adc     \$0,%r9
1331         adc     \$0,%r10
1332
1333         mov     %r8,%rax
1334         add     \$5,%r8         # compare to modulus
1335         mov     %r9,%rcx
1336         adc     \$0,%r9
1337         adc     \$0,%r10
1338         shr     \$2,%r10        # did 130-bit value overfow?
1339         cmovnz  %r8,%rax
1340         cmovnz  %r9,%rcx
1341
1342         add     0($nonce),%rax  # accumulate nonce
1343         adc     8($nonce),%rcx
1344         mov     %rax,0($mac)    # write result
1345         mov     %rcx,8($mac)
1346
1347         ret
1348 .size   poly1305_emit_avx,.-poly1305_emit_avx
1349 ___
1350
1351 if ($avx>1) {
1352 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1353     map("%ymm$_",(0..15));
1354 my $S4=$MASK;
1355
1356 $code.=<<___;
1357 .type   poly1305_blocks_avx2,\@function,4
1358 .align  32
1359 poly1305_blocks_avx2:
1360         mov     20($ctx),%r8d           # is_base2_26
1361         cmp     \$128,$len
1362         jae     .Lblocks_avx2
1363         test    %r8d,%r8d
1364         jz      .Lblocks
1365
1366 .Lblocks_avx2:
1367         and     \$-16,$len
1368         jz      .Lno_data_avx2
1369
1370         vzeroupper
1371
1372         test    %r8d,%r8d
1373         jz      .Lbase2_64_avx2
1374
1375         test    \$63,$len
1376         jz      .Leven_avx2
1377
1378         push    %rbx
1379         push    %rbp
1380         push    %r12
1381         push    %r13
1382         push    %r14
1383         push    %r15
1384 .Lblocks_avx2_body:
1385
1386         mov     $len,%r15               # reassign $len
1387
1388         mov     0($ctx),$d1             # load hash value
1389         mov     8($ctx),$d2
1390         mov     16($ctx),$h2#d
1391
1392         mov     24($ctx),$r0            # load r
1393         mov     32($ctx),$s1
1394
1395         ################################# base 2^26 -> base 2^64
1396         mov     $d1#d,$h0#d
1397         and     \$`-1*(1<<31)`,$d1
1398         mov     $d2,$r1                 # borrow $r1
1399         mov     $d2#d,$h1#d
1400         and     \$`-1*(1<<31)`,$d2
1401
1402         shr     \$6,$d1
1403         shl     \$52,$r1
1404         add     $d1,$h0
1405         shr     \$12,$h1
1406         shr     \$18,$d2
1407         add     $r1,$h0
1408         adc     $d2,$h1
1409
1410         mov     $h2,$d1
1411         shl     \$40,$d1
1412         shr     \$24,$h2
1413         add     $d1,$h1
1414         adc     \$0,$h2                 # can be partially reduced...
1415
1416         mov     \$-4,$d2                # ... so reduce
1417         mov     $h2,$d1
1418         and     $h2,$d2
1419         shr     \$2,$d1
1420         and     \$3,$h2
1421         add     $d2,$d1                 # =*5
1422         add     $d1,$h0
1423         adc     \$0,$h1
1424         adc     \$0,$h2
1425
1426         mov     $s1,$r1
1427         mov     $s1,%rax
1428         shr     \$2,$s1
1429         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1430
1431 .Lbase2_26_pre_avx2:
1432         add     0($inp),$h0             # accumulate input
1433         adc     8($inp),$h1
1434         lea     16($inp),$inp
1435         adc     $padbit,$h2
1436         sub     \$16,%r15
1437
1438         call    __poly1305_block
1439         mov     $r1,%rax
1440
1441         test    \$63,%r15
1442         jnz     .Lbase2_26_pre_avx2
1443
1444         test    $padbit,$padbit         # if $padbit is zero,
1445         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1446
1447         ################################# base 2^64 -> base 2^26
1448         mov     $h0,%rax
1449         mov     $h0,%rdx
1450         shr     \$52,$h0
1451         mov     $h1,$r0
1452         mov     $h1,$r1
1453         shr     \$26,%rdx
1454         and     \$0x3ffffff,%rax        # h[0]
1455         shl     \$12,$r0
1456         and     \$0x3ffffff,%rdx        # h[1]
1457         shr     \$14,$h1
1458         or      $r0,$h0
1459         shl     \$24,$h2
1460         and     \$0x3ffffff,$h0         # h[2]
1461         shr     \$40,$r1
1462         and     \$0x3ffffff,$h1         # h[3]
1463         or      $r1,$h2                 # h[4]
1464
1465         test    %r15,%r15
1466         jz      .Lstore_base2_26_avx2
1467
1468         vmovd   %rax#d,%x#$H0
1469         vmovd   %rdx#d,%x#$H1
1470         vmovd   $h0#d,%x#$H2
1471         vmovd   $h1#d,%x#$H3
1472         vmovd   $h2#d,%x#$H4
1473         jmp     .Lproceed_avx2
1474
1475 .align  32
1476 .Lstore_base2_64_avx2:
1477         mov     $h0,0($ctx)
1478         mov     $h1,8($ctx)
1479         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1480         jmp     .Ldone_avx2
1481
1482 .align  16
1483 .Lstore_base2_26_avx2:
1484         mov     %rax#d,0($ctx)          # store hash value base 2^26
1485         mov     %rdx#d,4($ctx)
1486         mov     $h0#d,8($ctx)
1487         mov     $h1#d,12($ctx)
1488         mov     $h2#d,16($ctx)
1489 .align  16
1490 .Ldone_avx2:
1491         mov     0(%rsp),%r15
1492         mov     8(%rsp),%r14
1493         mov     16(%rsp),%r13
1494         mov     24(%rsp),%r12
1495         mov     32(%rsp),%rbp
1496         mov     40(%rsp),%rbx
1497         lea     48(%rsp),%rsp
1498 .Lno_data_avx2:
1499 .Lblocks_avx2_epilogue:
1500         ret
1501
1502 .align  32
1503 .Lbase2_64_avx2:
1504         push    %rbx
1505         push    %rbp
1506         push    %r12
1507         push    %r13
1508         push    %r14
1509         push    %r15
1510 .Lbase2_64_avx2_body:
1511
1512         mov     $len,%r15               # reassign $len
1513
1514         mov     24($ctx),$r0            # load r
1515         mov     32($ctx),$s1
1516
1517         mov     0($ctx),$h0             # load hash value
1518         mov     8($ctx),$h1
1519         mov     16($ctx),$h2#d
1520
1521         mov     $s1,$r1
1522         mov     $s1,%rax
1523         shr     \$2,$s1
1524         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1525
1526         test    \$63,$len
1527         jz      .Linit_avx2
1528
1529 .Lbase2_64_pre_avx2:
1530         add     0($inp),$h0             # accumulate input
1531         adc     8($inp),$h1
1532         lea     16($inp),$inp
1533         adc     $padbit,$h2
1534         sub     \$16,%r15
1535
1536         call    __poly1305_block
1537         mov     $r1,%rax
1538
1539         test    \$63,%r15
1540         jnz     .Lbase2_64_pre_avx2
1541
1542 .Linit_avx2:
1543         ################################# base 2^64 -> base 2^26
1544         mov     $h0,%rax
1545         mov     $h0,%rdx
1546         shr     \$52,$h0
1547         mov     $h1,$d1
1548         mov     $h1,$d2
1549         shr     \$26,%rdx
1550         and     \$0x3ffffff,%rax        # h[0]
1551         shl     \$12,$d1
1552         and     \$0x3ffffff,%rdx        # h[1]
1553         shr     \$14,$h1
1554         or      $d1,$h0
1555         shl     \$24,$h2
1556         and     \$0x3ffffff,$h0         # h[2]
1557         shr     \$40,$d2
1558         and     \$0x3ffffff,$h1         # h[3]
1559         or      $d2,$h2                 # h[4]
1560
1561         vmovd   %rax#d,%x#$H0
1562         vmovd   %rdx#d,%x#$H1
1563         vmovd   $h0#d,%x#$H2
1564         vmovd   $h1#d,%x#$H3
1565         vmovd   $h2#d,%x#$H4
1566         movl    \$1,20($ctx)            # set is_base2_26
1567
1568         call    __poly1305_init_avx
1569
1570 .Lproceed_avx2:
1571         mov     %r15,$len
1572
1573         mov     0(%rsp),%r15
1574         mov     8(%rsp),%r14
1575         mov     16(%rsp),%r13
1576         mov     24(%rsp),%r12
1577         mov     32(%rsp),%rbp
1578         mov     40(%rsp),%rbx
1579         lea     48(%rsp),%rax
1580         lea     48(%rsp),%rsp
1581 .Lbase2_64_avx2_epilogue:
1582         jmp     .Ldo_avx2
1583
1584 .align  32
1585 .Leven_avx2:
1586         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1587         vmovd           4*1($ctx),%x#$H1
1588         vmovd           4*2($ctx),%x#$H2
1589         vmovd           4*3($ctx),%x#$H3
1590         vmovd           4*4($ctx),%x#$H4
1591
1592 .Ldo_avx2:
1593 ___
1594 $code.=<<___    if (!$win64);
1595         lea             -8(%rsp),%r11
1596         sub             \$0x128,%rsp
1597 ___
1598 $code.=<<___    if ($win64);
1599         lea             -0xf8(%rsp),%r11
1600         sub             \$0x1c8,%rsp
1601         vmovdqa         %xmm6,0x50(%r11)
1602         vmovdqa         %xmm7,0x60(%r11)
1603         vmovdqa         %xmm8,0x70(%r11)
1604         vmovdqa         %xmm9,0x80(%r11)
1605         vmovdqa         %xmm10,0x90(%r11)
1606         vmovdqa         %xmm11,0xa0(%r11)
1607         vmovdqa         %xmm12,0xb0(%r11)
1608         vmovdqa         %xmm13,0xc0(%r11)
1609         vmovdqa         %xmm14,0xd0(%r11)
1610         vmovdqa         %xmm15,0xe0(%r11)
1611 .Ldo_avx2_body:
1612 ___
1613 $code.=<<___;
1614         lea             48+64($ctx),$ctx        # size optimization
1615         lea             .Lconst(%rip),%rcx
1616
1617         # expand and copy pre-calculated table to stack
1618         vmovdqu         `16*0-64`($ctx),%x#$T2
1619         and             \$-512,%rsp
1620         vmovdqu         `16*1-64`($ctx),%x#$T3
1621         vmovdqu         `16*2-64`($ctx),%x#$T4
1622         vmovdqu         `16*3-64`($ctx),%x#$D0
1623         vmovdqu         `16*4-64`($ctx),%x#$D1
1624         vmovdqu         `16*5-64`($ctx),%x#$D2
1625         vmovdqu         `16*6-64`($ctx),%x#$D3
1626         vpermq          \$0x15,$T2,$T2          # 00003412 -> 12343434
1627         vmovdqu         `16*7-64`($ctx),%x#$D4
1628         vpermq          \$0x15,$T3,$T3
1629         vpshufd         \$0xc8,$T2,$T2          # 12343434 -> 14243444
1630         vmovdqu         `16*8-64`($ctx),%x#$MASK
1631         vpermq          \$0x15,$T4,$T4
1632         vpshufd         \$0xc8,$T3,$T3
1633         vmovdqa         $T2,0x00(%rsp)
1634         vpermq          \$0x15,$D0,$D0
1635         vpshufd         \$0xc8,$T4,$T4
1636         vmovdqa         $T3,0x20(%rsp)
1637         vpermq          \$0x15,$D1,$D1
1638         vpshufd         \$0xc8,$D0,$D0
1639         vmovdqa         $T4,0x40(%rsp)
1640         vpermq          \$0x15,$D2,$D2
1641         vpshufd         \$0xc8,$D1,$D1
1642         vmovdqa         $D0,0x60(%rsp)
1643         vpermq          \$0x15,$D3,$D3
1644         vpshufd         \$0xc8,$D2,$D2
1645         vmovdqa         $D1,0x80(%rsp)
1646         vpermq          \$0x15,$D4,$D4
1647         vpshufd         \$0xc8,$D3,$D3
1648         vmovdqa         $D2,0xa0(%rsp)
1649         vpermq          \$0x15,$MASK,$MASK
1650         vpshufd         \$0xc8,$D4,$D4
1651         vmovdqa         $D3,0xc0(%rsp)
1652         vpshufd         \$0xc8,$MASK,$MASK
1653         vmovdqa         $D4,0xe0(%rsp)
1654         vmovdqa         $MASK,0x100(%rsp)
1655         vmovdqa         64(%rcx),$MASK          # .Lmask26
1656
1657         ################################################################
1658         # load input
1659         vmovdqu         16*0($inp),%x#$T0
1660         vmovdqu         16*1($inp),%x#$T1
1661         vinserti128     \$1,16*2($inp),$T0,$T0
1662         vinserti128     \$1,16*3($inp),$T1,$T1
1663         lea             16*4($inp),$inp
1664
1665         vpsrldq         \$6,$T0,$T2             # splat input
1666         vpsrldq         \$6,$T1,$T3
1667         vpunpckhqdq     $T1,$T0,$T4             # 4
1668         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1669         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1670
1671         vpsrlq          \$30,$T2,$T3
1672         vpsrlq          \$4,$T2,$T2
1673         vpsrlq          \$26,$T0,$T1
1674         vpsrlq          \$40,$T4,$T4            # 4
1675         vpand           $MASK,$T2,$T2           # 2
1676         vpand           $MASK,$T0,$T0           # 0
1677         vpand           $MASK,$T1,$T1           # 1
1678         vpand           $MASK,$T3,$T3           # 3
1679         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1680
1681         lea             0x90(%rsp),%rax         # size optimization
1682         vpaddq          $H2,$T2,$H2             # accumulate input
1683         sub             \$64,$len
1684         jz              .Ltail_avx2
1685         jmp             .Loop_avx2
1686
1687 .align  32
1688 .Loop_avx2:
1689         ################################################################
1690         # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1691         # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1692         # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1693         # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1694         #   \________/\________/
1695         ################################################################
1696         #vpaddq         $H2,$T2,$H2             # accumulate input
1697         vpaddq          $H0,$T0,$H0
1698         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1699         vpaddq          $H1,$T1,$H1
1700         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1701         vpaddq          $H3,$T3,$H3
1702         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1703         vpaddq          $H4,$T4,$H4
1704         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1705         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1706
1707         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1708         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1709         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1710         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1711         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1712         #
1713         # however, as h2 is "chronologically" first one available pull
1714         # corresponding operations up, so it's
1715         #
1716         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1717         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1718         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1719         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1720         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1721
1722         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1723         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1724         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1725         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1726         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1727
1728         vpmuludq        $H0,$T1,$T4             # h0*r1
1729         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1730         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1731         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1732         vpmuludq        $H3,$T1,$T4             # h3*r1
1733         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1734         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1735         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1736          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1737
1738         vpmuludq        $H0,$T0,$T4             # h0*r0
1739         vpmuludq        $H1,$T0,$H2             # h1*r0
1740         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1741         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1742         vpmuludq        $H3,$T0,$T4             # h3*r0
1743         vpmuludq        $H4,$T0,$H2             # h4*r0
1744          vmovdqu        16*0($inp),%x#$T0       # load input
1745         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1746         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1747          vinserti128    \$1,16*2($inp),$T0,$T0
1748
1749         vpmuludq        $H3,$T1,$T4             # h3*s2
1750         vpmuludq        $H4,$T1,$H2             # h4*s2
1751          vmovdqu        16*1($inp),%x#$T1
1752         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1753         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1754          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1755         vpmuludq        $H1,$T2,$T4             # h1*r2
1756         vpmuludq        $H0,$T2,$T2             # h0*r2
1757         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1758         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1759          vinserti128    \$1,16*3($inp),$T1,$T1
1760          lea            16*4($inp),$inp
1761
1762         vpmuludq        $H1,$H2,$T4             # h1*r3
1763         vpmuludq        $H0,$H2,$H2             # h0*r3
1764          vpsrldq        \$6,$T0,$T2             # splat input
1765         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1766         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1767         vpmuludq        $H3,$T3,$T4             # h3*s3
1768         vpmuludq        $H4,$T3,$H2             # h4*s3
1769          vpsrldq        \$6,$T1,$T3
1770         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1771         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1772          vpunpckhqdq    $T1,$T0,$T4             # 4
1773
1774         vpmuludq        $H3,$S4,$H3             # h3*s4
1775         vpmuludq        $H4,$S4,$H4             # h4*s4
1776          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1777         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1778         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1779          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1780         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1781         vpmuludq        $H1,$S4,$H0             # h1*s4
1782         vmovdqa         64(%rcx),$MASK          # .Lmask26
1783         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1784         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1785
1786         ################################################################
1787         # lazy reduction (interleaved with tail of input splat)
1788
1789         vpsrlq          \$26,$H3,$D3
1790         vpand           $MASK,$H3,$H3
1791         vpaddq          $D3,$H4,$H4             # h3 -> h4
1792
1793         vpsrlq          \$26,$H0,$D0
1794         vpand           $MASK,$H0,$H0
1795         vpaddq          $D0,$D1,$H1             # h0 -> h1
1796
1797         vpsrlq          \$26,$H4,$D4
1798         vpand           $MASK,$H4,$H4
1799
1800          vpsrlq         \$4,$T3,$T2
1801
1802         vpsrlq          \$26,$H1,$D1
1803         vpand           $MASK,$H1,$H1
1804         vpaddq          $D1,$H2,$H2             # h1 -> h2
1805
1806         vpaddq          $D4,$H0,$H0
1807         vpsllq          \$2,$D4,$D4
1808         vpaddq          $D4,$H0,$H0             # h4 -> h0
1809
1810          vpand          $MASK,$T2,$T2           # 2
1811          vpsrlq         \$26,$T0,$T1
1812
1813         vpsrlq          \$26,$H2,$D2
1814         vpand           $MASK,$H2,$H2
1815         vpaddq          $D2,$H3,$H3             # h2 -> h3
1816
1817          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1818          vpsrlq         \$30,$T3,$T3
1819
1820         vpsrlq          \$26,$H0,$D0
1821         vpand           $MASK,$H0,$H0
1822         vpaddq          $D0,$H1,$H1             # h0 -> h1
1823
1824          vpsrlq         \$40,$T4,$T4            # 4
1825
1826         vpsrlq          \$26,$H3,$D3
1827         vpand           $MASK,$H3,$H3
1828         vpaddq          $D3,$H4,$H4             # h3 -> h4
1829
1830          vpand          $MASK,$T0,$T0           # 0
1831          vpand          $MASK,$T1,$T1           # 1
1832          vpand          $MASK,$T3,$T3           # 3
1833          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1834
1835         sub             \$64,$len
1836         jnz             .Loop_avx2
1837
1838         .byte           0x66,0x90
1839 .Ltail_avx2:
1840         ################################################################
1841         # while above multiplications were by r^4 in all lanes, in last
1842         # iteration we multiply least significant lane by r^4 and most
1843         # significant one by r, so copy of above except that references
1844         # to the precomputed table are displaced by 4...
1845
1846         #vpaddq         $H2,$T2,$H2             # accumulate input
1847         vpaddq          $H0,$T0,$H0
1848         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1849         vpaddq          $H1,$T1,$H1
1850         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1851         vpaddq          $H3,$T3,$H3
1852         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1853         vpaddq          $H4,$T4,$H4
1854         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1855         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1856
1857         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1858         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1859         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1860         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1861         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1862
1863         vpmuludq        $H0,$T1,$T4             # h0*r1
1864         vpmuludq        $H1,$T1,$H2             # h1*r1
1865         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1866         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1867         vpmuludq        $H3,$T1,$T4             # h3*r1
1868         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1869         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1870         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1871
1872         vpmuludq        $H0,$T0,$T4             # h0*r0
1873         vpmuludq        $H1,$T0,$H2             # h1*r0
1874         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1875          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1876         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1877         vpmuludq        $H3,$T0,$T4             # h3*r0
1878         vpmuludq        $H4,$T0,$H2             # h4*r0
1879         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1880         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1881
1882         vpmuludq        $H3,$T1,$T4             # h3*s2
1883         vpmuludq        $H4,$T1,$H2             # h4*s2
1884         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1885         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1886          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1887         vpmuludq        $H1,$T2,$T4             # h1*r2
1888         vpmuludq        $H0,$T2,$T2             # h0*r2
1889         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1890         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1891
1892         vpmuludq        $H1,$H2,$T4             # h1*r3
1893         vpmuludq        $H0,$H2,$H2             # h0*r3
1894         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1895         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1896         vpmuludq        $H3,$T3,$T4             # h3*s3
1897         vpmuludq        $H4,$T3,$H2             # h4*s3
1898         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1899         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1900
1901         vpmuludq        $H3,$S4,$H3             # h3*s4
1902         vpmuludq        $H4,$S4,$H4             # h4*s4
1903         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1904         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1905         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
1906         vpmuludq        $H1,$S4,$H0             # h1*s4
1907         vmovdqa         64(%rcx),$MASK          # .Lmask26
1908         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1909         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1910
1911         ################################################################
1912         # horizontal addition
1913
1914         vpsrldq         \$8,$D1,$T1
1915         vpsrldq         \$8,$H2,$T2
1916         vpsrldq         \$8,$H3,$T3
1917         vpsrldq         \$8,$H4,$T4
1918         vpsrldq         \$8,$H0,$T0
1919         vpaddq          $T1,$D1,$D1
1920         vpaddq          $T2,$H2,$H2
1921         vpaddq          $T3,$H3,$H3
1922         vpaddq          $T4,$H4,$H4
1923         vpaddq          $T0,$H0,$H0
1924
1925         vpermq          \$0x2,$H3,$T3
1926         vpermq          \$0x2,$H4,$T4
1927         vpermq          \$0x2,$H0,$T0
1928         vpermq          \$0x2,$D1,$T1
1929         vpermq          \$0x2,$H2,$T2
1930         vpaddq          $T3,$H3,$H3
1931         vpaddq          $T4,$H4,$H4
1932         vpaddq          $T0,$H0,$H0
1933         vpaddq          $T1,$D1,$D1
1934         vpaddq          $T2,$H2,$H2
1935
1936         ################################################################
1937         # lazy reduction
1938
1939         vpsrlq          \$26,$H3,$D3
1940         vpand           $MASK,$H3,$H3
1941         vpaddq          $D3,$H4,$H4             # h3 -> h4
1942
1943         vpsrlq          \$26,$H0,$D0
1944         vpand           $MASK,$H0,$H0
1945         vpaddq          $D0,$D1,$H1             # h0 -> h1
1946
1947         vpsrlq          \$26,$H4,$D4
1948         vpand           $MASK,$H4,$H4
1949
1950         vpsrlq          \$26,$H1,$D1
1951         vpand           $MASK,$H1,$H1
1952         vpaddq          $D1,$H2,$H2             # h1 -> h2
1953
1954         vpaddq          $D4,$H0,$H0
1955         vpsllq          \$2,$D4,$D4
1956         vpaddq          $D4,$H0,$H0             # h4 -> h0
1957
1958         vpsrlq          \$26,$H2,$D2
1959         vpand           $MASK,$H2,$H2
1960         vpaddq          $D2,$H3,$H3             # h2 -> h3
1961
1962         vpsrlq          \$26,$H0,$D0
1963         vpand           $MASK,$H0,$H0
1964         vpaddq          $D0,$H1,$H1             # h0 -> h1
1965
1966         vpsrlq          \$26,$H3,$D3
1967         vpand           $MASK,$H3,$H3
1968         vpaddq          $D3,$H4,$H4             # h3 -> h4
1969
1970         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1971         vmovd           %x#$H1,`4*1-48-64`($ctx)
1972         vmovd           %x#$H2,`4*2-48-64`($ctx)
1973         vmovd           %x#$H3,`4*3-48-64`($ctx)
1974         vmovd           %x#$H4,`4*4-48-64`($ctx)
1975 ___
1976 $code.=<<___    if ($win64);
1977         vmovdqa         0x50(%r11),%xmm6
1978         vmovdqa         0x60(%r11),%xmm7
1979         vmovdqa         0x70(%r11),%xmm8
1980         vmovdqa         0x80(%r11),%xmm9
1981         vmovdqa         0x90(%r11),%xmm10
1982         vmovdqa         0xa0(%r11),%xmm11
1983         vmovdqa         0xb0(%r11),%xmm12
1984         vmovdqa         0xc0(%r11),%xmm13
1985         vmovdqa         0xd0(%r11),%xmm14
1986         vmovdqa         0xe0(%r11),%xmm15
1987         lea             0xf8(%r11),%rsp
1988 .Ldo_avx2_epilogue:
1989 ___
1990 $code.=<<___    if (!$win64);
1991         lea             8(%r11),%rsp
1992 ___
1993 $code.=<<___;
1994         vzeroupper
1995         ret
1996 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
1997 ___
1998 }
1999 $code.=<<___;
2000 .align  64
2001 .Lconst:
2002 .Lmask24:
2003 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2004 .L129:
2005 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2006 .Lmask26:
2007 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2008 .Lfive:
2009 .long   5,0,5,0,5,0,5,0
2010 ___
2011 }
2012
2013 $code.=<<___;
2014 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2015 .align  16
2016 ___
2017
2018 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2019 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2020 if ($win64) {
2021 $rec="%rcx";
2022 $frame="%rdx";
2023 $context="%r8";
2024 $disp="%r9";
2025
2026 $code.=<<___;
2027 .extern __imp_RtlVirtualUnwind
2028 .type   se_handler,\@abi-omnipotent
2029 .align  16
2030 se_handler:
2031         push    %rsi
2032         push    %rdi
2033         push    %rbx
2034         push    %rbp
2035         push    %r12
2036         push    %r13
2037         push    %r14
2038         push    %r15
2039         pushfq
2040         sub     \$64,%rsp
2041
2042         mov     120($context),%rax      # pull context->Rax
2043         mov     248($context),%rbx      # pull context->Rip
2044
2045         mov     8($disp),%rsi           # disp->ImageBase
2046         mov     56($disp),%r11          # disp->HandlerData
2047
2048         mov     0(%r11),%r10d           # HandlerData[0]
2049         lea     (%rsi,%r10),%r10        # prologue label
2050         cmp     %r10,%rbx               # context->Rip<.Lprologue
2051         jb      .Lcommon_seh_tail
2052
2053         mov     152($context),%rax      # pull context->Rsp
2054
2055         mov     4(%r11),%r10d           # HandlerData[1]
2056         lea     (%rsi,%r10),%r10        # epilogue label
2057         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2058         jae     .Lcommon_seh_tail
2059
2060         lea     48(%rax),%rax
2061
2062         mov     -8(%rax),%rbx
2063         mov     -16(%rax),%rbp
2064         mov     -24(%rax),%r12
2065         mov     -32(%rax),%r13
2066         mov     -40(%rax),%r14
2067         mov     -48(%rax),%r15
2068         mov     %rbx,144($context)      # restore context->Rbx
2069         mov     %rbp,160($context)      # restore context->Rbp
2070         mov     %r12,216($context)      # restore context->R12
2071         mov     %r13,224($context)      # restore context->R13
2072         mov     %r14,232($context)      # restore context->R14
2073         mov     %r15,240($context)      # restore context->R14
2074
2075         jmp     .Lcommon_seh_tail
2076 .size   se_handler,.-se_handler
2077
2078 .type   avx_handler,\@abi-omnipotent
2079 .align  16
2080 avx_handler:
2081         push    %rsi
2082         push    %rdi
2083         push    %rbx
2084         push    %rbp
2085         push    %r12
2086         push    %r13
2087         push    %r14
2088         push    %r15
2089         pushfq
2090         sub     \$64,%rsp
2091
2092         mov     120($context),%rax      # pull context->Rax
2093         mov     248($context),%rbx      # pull context->Rip
2094
2095         mov     8($disp),%rsi           # disp->ImageBase
2096         mov     56($disp),%r11          # disp->HandlerData
2097
2098         mov     0(%r11),%r10d           # HandlerData[0]
2099         lea     (%rsi,%r10),%r10        # prologue label
2100         cmp     %r10,%rbx               # context->Rip<prologue label
2101         jb      .Lcommon_seh_tail
2102
2103         mov     152($context),%rax      # pull context->Rsp
2104
2105         mov     4(%r11),%r10d           # HandlerData[1]
2106         lea     (%rsi,%r10),%r10        # epilogue label
2107         cmp     %r10,%rbx               # context->Rip>=epilogue label
2108         jae     .Lcommon_seh_tail
2109
2110         mov     208($context),%rax      # pull context->R11
2111
2112         lea     0x50(%rax),%rsi
2113         lea     0xf8(%rax),%rax
2114         lea     512($context),%rdi      # &context.Xmm6
2115         mov     \$20,%ecx
2116         .long   0xa548f3fc              # cld; rep movsq
2117
2118 .Lcommon_seh_tail:
2119         mov     8(%rax),%rdi
2120         mov     16(%rax),%rsi
2121         mov     %rax,152($context)      # restore context->Rsp
2122         mov     %rsi,168($context)      # restore context->Rsi
2123         mov     %rdi,176($context)      # restore context->Rdi
2124
2125         mov     40($disp),%rdi          # disp->ContextRecord
2126         mov     $context,%rsi           # context
2127         mov     \$154,%ecx              # sizeof(CONTEXT)
2128         .long   0xa548f3fc              # cld; rep movsq
2129
2130         mov     $disp,%rsi
2131         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2132         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2133         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2134         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2135         mov     40(%rsi),%r10           # disp->ContextRecord
2136         lea     56(%rsi),%r11           # &disp->HandlerData
2137         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2138         mov     %r10,32(%rsp)           # arg5
2139         mov     %r11,40(%rsp)           # arg6
2140         mov     %r12,48(%rsp)           # arg7
2141         mov     %rcx,56(%rsp)           # arg8, (NULL)
2142         call    *__imp_RtlVirtualUnwind(%rip)
2143
2144         mov     \$1,%eax                # ExceptionContinueSearch
2145         add     \$64,%rsp
2146         popfq
2147         pop     %r15
2148         pop     %r14
2149         pop     %r13
2150         pop     %r12
2151         pop     %rbp
2152         pop     %rbx
2153         pop     %rdi
2154         pop     %rsi
2155         ret
2156 .size   avx_handler,.-avx_handler
2157
2158 .section        .pdata
2159 .align  4
2160         .rva    .LSEH_begin_poly1305_init
2161         .rva    .LSEH_end_poly1305_init
2162         .rva    .LSEH_info_poly1305_init
2163
2164         .rva    .LSEH_begin_poly1305_blocks
2165         .rva    .LSEH_end_poly1305_blocks
2166         .rva    .LSEH_info_poly1305_blocks
2167
2168         .rva    .LSEH_begin_poly1305_emit
2169         .rva    .LSEH_end_poly1305_emit
2170         .rva    .LSEH_info_poly1305_emit
2171 ___
2172 $code.=<<___ if ($avx);
2173         .rva    .LSEH_begin_poly1305_blocks_avx
2174         .rva    .Lbase2_64_avx
2175         .rva    .LSEH_info_poly1305_blocks_avx_1
2176
2177         .rva    .Lbase2_64_avx
2178         .rva    .Leven_avx
2179         .rva    .LSEH_info_poly1305_blocks_avx_2
2180
2181         .rva    .Leven_avx
2182         .rva    .LSEH_end_poly1305_blocks_avx
2183         .rva    .LSEH_info_poly1305_blocks_avx_3
2184
2185         .rva    .LSEH_begin_poly1305_emit_avx
2186         .rva    .LSEH_end_poly1305_emit_avx
2187         .rva    .LSEH_info_poly1305_emit_avx
2188 ___
2189 $code.=<<___ if ($avx>1);
2190         .rva    .LSEH_begin_poly1305_blocks_avx2
2191         .rva    .Lbase2_64_avx2
2192         .rva    .LSEH_info_poly1305_blocks_avx2_1
2193
2194         .rva    .Lbase2_64_avx2
2195         .rva    .Leven_avx2
2196         .rva    .LSEH_info_poly1305_blocks_avx2_2
2197
2198         .rva    .Leven_avx2
2199         .rva    .LSEH_end_poly1305_blocks_avx2
2200         .rva    .LSEH_info_poly1305_blocks_avx2_3
2201 ___
2202 $code.=<<___;
2203 .section        .xdata
2204 .align  8
2205 .LSEH_info_poly1305_init:
2206         .byte   9,0,0,0
2207         .rva    se_handler
2208         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2209
2210 .LSEH_info_poly1305_blocks:
2211         .byte   9,0,0,0
2212         .rva    se_handler
2213         .rva    .Lblocks_body,.Lblocks_epilogue
2214
2215 .LSEH_info_poly1305_emit:
2216         .byte   9,0,0,0
2217         .rva    se_handler
2218         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2219 ___
2220 $code.=<<___ if ($avx);
2221 .LSEH_info_poly1305_blocks_avx_1:
2222         .byte   9,0,0,0
2223         .rva    se_handler
2224         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
2225
2226 .LSEH_info_poly1305_blocks_avx_2:
2227         .byte   9,0,0,0
2228         .rva    se_handler
2229         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
2230
2231 .LSEH_info_poly1305_blocks_avx_3:
2232         .byte   9,0,0,0
2233         .rva    avx_handler
2234         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
2235
2236 .LSEH_info_poly1305_emit_avx:
2237         .byte   9,0,0,0
2238         .rva    se_handler
2239         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2240 ___
2241 $code.=<<___ if ($avx>1);
2242 .LSEH_info_poly1305_blocks_avx2_1:
2243         .byte   9,0,0,0
2244         .rva    se_handler
2245         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
2246
2247 .LSEH_info_poly1305_blocks_avx2_2:
2248         .byte   9,0,0,0
2249         .rva    se_handler
2250         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
2251
2252 .LSEH_info_poly1305_blocks_avx2_3:
2253         .byte   9,0,0,0
2254         .rva    avx_handler
2255         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
2256 ___
2257 }
2258
2259 foreach (split('\n',$code)) {
2260         s/\`([^\`]*)\`/eval($1)/ge;
2261         s/%r([a-z]+)#d/%e$1/g;
2262         s/%r([0-9]+)#d/%r$1d/g;
2263         s/%x#%y/%x/g;
2264
2265         print $_,"\n";
2266 }
2267 close STDOUT;