x86[_64] assembly pack: add ChaCha20 and Poly1305 modules.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # This module implements Poly1305 hash for x86_64.
11 #
12 # March 2015
13 #
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
16 #
17 #               IALU/gcc-4.8(*) AVX(**)         AVX2
18 # P4            4.90/+120%      -
19 # Core 2        2.39/+90%       -
20 # Westmere      1.86/+120%      -
21 # Sandy Bridge  1.39/+140%      1.10
22 # Haswell       1.10/+175%      1.11            0.65
23 # Skylake       1.12/+120%      0.96            0.51
24 # Silvermont    2.83/+95%       -
25 # VIA Nano      1.82/+150%      -
26 # Sledgehammer  1.38/+160%      -
27 # Bulldozer     2.21/+130%      0.97
28 #
29 # (*)   improvement coefficients relative to clang are more modest and
30 #       are ~50% on most processors, in both cases we are comparing to
31 #       __int128 code;
32 # (**)  SSE2 implementation was attempted, but among non-AVX processors
33 #       it was faster than integer-only code only on older Intel P4 and
34 #       Core processors, 50-30%, less newer processor is, but slower on
35 #       contemporary ones, for example almost 2x slower on Atom, and as
36 #       former are naturally disappearing, SSE2 is deemed unnecessary;
37
38 $flavour = shift;
39 $output  = shift;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
41
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
43
44 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
47 die "can't locate x86_64-xlate.pl";
48
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51         $avx = ($1>=2.19) + ($1>=2.22);
52 }
53
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
55            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56         $avx = ($1>=2.09) + ($1>=2.10);
57 }
58
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
60            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61         $avx = ($1>=10) + ($1>=12);
62 }
63
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65         $avx = ($2>=3.0) + ($2>3.0);
66 }
67
68 open OUT,"| \"$^X\" $xlate $flavour $output";
69 *STDOUT=*OUT;
70
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
75
76 sub poly1305_iteration {
77 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output:       $h0-$h2 *= $r0-$r1
79 $code.=<<___;
80         mulq    $h0                     # h0*r1
81         mov     %rax,$d2
82          mov    $r0,%rax
83         mov     %rdx,$d3
84
85         mulq    $h0                     # h0*r0
86         mov     %rax,$h0                # future $h0
87          mov    $r0,%rax
88         mov     %rdx,$d1
89
90         mulq    $h1                     # h1*r0
91         add     %rax,$d2
92          mov    $s1,%rax
93         adc     %rdx,$d3
94
95         mulq    $h1                     # h1*s1
96          mov    $h2,$h1                 # borrow $h1
97         add     %rax,$h0
98         adc     %rdx,$d1
99
100         imulq   $s1,$h1                 # h2*s1
101         add     $h1,$d2
102          mov    $d1,$h1
103         adc     \$0,$d3
104
105         imulq   $r0,$h2                 # h2*r0
106         add     $d2,$h1
107         mov     \$-4,%rax               # mask value
108         adc     $h2,$d3
109
110         and     $d3,%rax                # last reduction step
111         mov     $d3,$h2
112         shr     \$2,$d3
113         and     \$3,$h2
114         add     $d3,%rax
115         add     %rax,$h0
116         adc     \$0,$h1
117 ___
118 }
119
120 ########################################################################
121 # Layout of opaque area is following.
122 #
123 #       unsigned __int64 h[3];          # current hash value base 2^64
124 #       unsigned __int64 r[2];          # key value base 2^64
125
126 $code.=<<___;
127 .text
128
129 .extern OPENSSL_ia32cap_P
130
131 .globl  poly1305_init
132 .type   poly1305_init,\@function,2
133 .align  32
134 poly1305_init:
135         xor     %rax,%rax
136         mov     %rax,0($ctx)            # initialize hash value
137         mov     %rax,8($ctx)
138         mov     %rax,16($ctx)
139
140         cmp     \$0,$inp
141         je      .Lno_key
142
143         lea     poly1305_blocks(%rip),%r10
144         lea     poly1305_emit(%rip),%r11
145 ___
146 $code.=<<___    if ($avx);
147         mov     OPENSSL_ia32cap_P+4(%rip),%r9
148         lea     poly1305_blocks_avx(%rip),%rax
149         lea     poly1305_emit_avx(%rip),%rcx
150         bt      \$`60-32`,%r9           # AVX?
151         cmovc   %rax,%r10
152         cmovc   %rcx,%r11
153 ___
154 $code.=<<___    if ($avx>1);
155         lea     poly1305_blocks_avx2(%rip),%rax
156         bt      \$`5+32`,%r9            # AVX2?
157         cmovc   %rax,%r10
158 ___
159 $code.=<<___;
160         mov     \$0x0ffffffc0fffffff,%rax
161         mov     \$0x0ffffffc0ffffffc,%rcx
162         and     0($inp),%rax
163         and     8($inp),%rcx
164         mov     %rax,24($ctx)
165         mov     %rcx,32($ctx)
166
167         mov     %r10,0(%rdx)
168         mov     %r11,8(%rdx)
169
170         mov     \$1,%eax
171 .Lno_key:
172         ret
173 .size   poly1305_init,.-poly1305_init
174
175 .globl  poly1305_blocks
176 .type   poly1305_blocks,\@function,4
177 .align  32
178 poly1305_blocks:
179         sub     \$16,$len               # too short?
180         jc      .Lno_data
181
182         push    %rbx
183         push    %rbp
184         push    %r12
185         push    %r13
186         push    %r14
187         push    %r15
188 .Lblocks_body:
189
190         mov     $len,%r15               # reassign $len
191
192         mov     24($ctx),$r0            # load r
193         mov     32($ctx),$s1
194
195         mov     0($ctx),$h0             # load hash value
196         mov     8($ctx),$h1
197         mov     16($ctx),$h2
198
199         mov     $s1,$r1
200         shr     \$2,$s1
201         mov     $r1,%rax
202         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
203         jmp     .Loop
204
205 .align  32
206 .Loop:
207         add     0($inp),$h0             # accumulate input
208         adc     8($inp),$h1
209         lea     16($inp),$inp
210         adc     $padbit,$h2
211 ___
212         &poly1305_iteration();
213 $code.=<<___;
214         mov     $r1,%rax
215         sub     \$16,%r15               # len-=16
216         jnc     .Loop
217
218         mov     $h0,0($ctx)             # store hash value
219         mov     $h1,8($ctx)
220         mov     $h2,16($ctx)
221
222         mov     0(%rsp),%r15
223         mov     8(%rsp),%r14
224         mov     16(%rsp),%r13
225         mov     24(%rsp),%r12
226         mov     32(%rsp),%rbp
227         mov     40(%rsp),%rbx
228         lea     48(%rsp),%rsp
229 .Lno_data:
230 .Lblocks_epilogue:
231         ret
232 .size   poly1305_blocks,.-poly1305_blocks
233
234 .globl  poly1305_emit
235 .type   poly1305_emit,\@function,3
236 .align  32
237 poly1305_emit:
238         mov     0($ctx),%r8     # load hash value
239         mov     8($ctx),%r9
240         mov     16($ctx),%r10
241
242         mov     %r8,%rax
243         add     \$5,%r8         # compare to modulus
244         mov     %r9,%rcx
245         adc     \$0,%r9
246         adc     \$0,%r10
247         shr     \$2,%r10        # did 130-bit value overfow?
248         cmovnz  %r8,%rax
249         cmovnz  %r9,%rcx
250
251         add     0($nonce),%rax  # accumulate nonce
252         adc     8($nonce),%rcx
253         mov     %rax,0($mac)    # write result
254         mov     %rcx,8($mac)
255
256         ret
257 .size   poly1305_emit,.-poly1305_emit
258 ___
259 if ($avx) {
260
261 ########################################################################
262 # Layout of opaque area is following.
263 #
264 #       unsigned __int32 h[5];          # current hash value base 2^26
265 #       unsigned __int32 is_base2_26;
266 #       unsigned __int64 r[2];          # key value base 2^64
267 #       unsigned __int64 pad;
268 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
269 #
270 # where r^n are base 2^26 digits of degrees of multiplier key. There are
271 # 5 digits, but last four are interleaved with multiples of 5, totalling
272 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
273
274 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
275     map("%xmm$_",(0..15));
276
277 $code.=<<___;
278 .type   __poly1305_block,\@abi-omnipotent
279 .align  32
280 __poly1305_block:
281 ___
282         &poly1305_iteration();
283 $code.=<<___;
284         ret
285 .size   __poly1305_block,.-__poly1305_block
286
287 .type   __poly1305_init_avx,\@abi-omnipotent
288 .align  32
289 __poly1305_init_avx:
290         mov     $r0,$h0
291         mov     $r1,$h1
292         xor     $h2,$h2
293
294         lea     48+64($ctx),$ctx        # size optimization
295
296         mov     $r1,%rax
297         call    __poly1305_block        # r^2
298
299         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
300         mov     \$0x3ffffff,%edx
301         mov     $h0,$d1
302         and     $h0#d,%eax
303         mov     $r0,$d2
304         and     $r0#d,%edx
305         mov     %eax,`16*0+0-64`($ctx)
306         shr     \$26,$d1
307         mov     %edx,`16*0+4-64`($ctx)
308         shr     \$26,$d2
309
310         mov     \$0x3ffffff,%eax
311         mov     \$0x3ffffff,%edx
312         and     $d1#d,%eax
313         and     $d2#d,%edx
314         mov     %eax,`16*1+0-64`($ctx)
315         lea     (%rax,%rax,4),%eax      # *5
316         mov     %edx,`16*1+4-64`($ctx)
317         lea     (%rdx,%rdx,4),%edx      # *5
318         mov     %eax,`16*2+0-64`($ctx)
319         shr     \$26,$d1
320         mov     %edx,`16*2+4-64`($ctx)
321         shr     \$26,$d2
322
323         mov     $h1,%rax
324         mov     $r1,%rdx
325         shl     \$12,%rax
326         shl     \$12,%rdx
327         or      $d1,%rax
328         or      $d2,%rdx
329         and     \$0x3ffffff,%eax
330         and     \$0x3ffffff,%edx
331         mov     %eax,`16*3+0-64`($ctx)
332         lea     (%rax,%rax,4),%eax      # *5
333         mov     %edx,`16*3+4-64`($ctx)
334         lea     (%rdx,%rdx,4),%edx      # *5
335         mov     %eax,`16*4+0-64`($ctx)
336         mov     $h1,$d1
337         mov     %edx,`16*4+4-64`($ctx)
338         mov     $r1,$d2
339
340         mov     \$0x3ffffff,%eax
341         mov     \$0x3ffffff,%edx
342         shr     \$14,$d1
343         shr     \$14,$d2
344         and     $d1#d,%eax
345         and     $d2#d,%edx
346         mov     %eax,`16*5+0-64`($ctx)
347         lea     (%rax,%rax,4),%eax      # *5
348         mov     %edx,`16*5+4-64`($ctx)
349         lea     (%rdx,%rdx,4),%edx      # *5
350         mov     %eax,`16*6+0-64`($ctx)
351         shr     \$26,$d1
352         mov     %edx,`16*6+4-64`($ctx)
353         shr     \$26,$d2
354
355         mov     $h2,%rax
356         shl     \$24,%rax
357         or      %rax,$d1
358         mov     $d1#d,`16*7+0-64`($ctx)
359         lea     ($d1,$d1,4),$d1         # *5
360         mov     $d2#d,`16*7+4-64`($ctx)
361         lea     ($d2,$d2,4),$d2         # *5
362         mov     $d1#d,`16*8+0-64`($ctx)
363         mov     $d2#d,`16*8+4-64`($ctx)
364
365         mov     $r1,%rax
366         call    __poly1305_block        # r^3
367
368         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
369         mov     $h0,$d1
370         and     $h0#d,%eax
371         shr     \$26,$d1
372         mov     %eax,`16*0+12-64`($ctx)
373
374         mov     \$0x3ffffff,%edx
375         and     $d1#d,%edx
376         mov     %edx,`16*1+12-64`($ctx)
377         lea     (%rdx,%rdx,4),%edx      # *5
378         shr     \$26,$d1
379         mov     %edx,`16*2+12-64`($ctx)
380
381         mov     $h1,%rax
382         shl     \$12,%rax
383         or      $d1,%rax
384         and     \$0x3ffffff,%eax
385         mov     %eax,`16*3+12-64`($ctx)
386         lea     (%rax,%rax,4),%eax      # *5
387         mov     $h1,$d1
388         mov     %eax,`16*4+12-64`($ctx)
389
390         mov     \$0x3ffffff,%edx
391         shr     \$14,$d1
392         and     $d1#d,%edx
393         mov     %edx,`16*5+12-64`($ctx)
394         lea     (%rdx,%rdx,4),%edx      # *5
395         shr     \$26,$d1
396         mov     %edx,`16*6+12-64`($ctx)
397
398         mov     $h2,%rax
399         shl     \$24,%rax
400         or      %rax,$d1
401         mov     $d1#d,`16*7+12-64`($ctx)
402         lea     ($d1,$d1,4),$d1         # *5
403         mov     $d1#d,`16*8+12-64`($ctx)
404
405         mov     $r1,%rax
406         call    __poly1305_block        # r^4
407
408         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
409         mov     $h0,$d1
410         and     $h0#d,%eax
411         shr     \$26,$d1
412         mov     %eax,`16*0+8-64`($ctx)
413
414         mov     \$0x3ffffff,%edx
415         and     $d1#d,%edx
416         mov     %edx,`16*1+8-64`($ctx)
417         lea     (%rdx,%rdx,4),%edx      # *5
418         shr     \$26,$d1
419         mov     %edx,`16*2+8-64`($ctx)
420
421         mov     $h1,%rax
422         shl     \$12,%rax
423         or      $d1,%rax
424         and     \$0x3ffffff,%eax
425         mov     %eax,`16*3+8-64`($ctx)
426         lea     (%rax,%rax,4),%eax      # *5
427         mov     $h1,$d1
428         mov     %eax,`16*4+8-64`($ctx)
429
430         mov     \$0x3ffffff,%edx
431         shr     \$14,$d1
432         and     $d1#d,%edx
433         mov     %edx,`16*5+8-64`($ctx)
434         lea     (%rdx,%rdx,4),%edx      # *5
435         shr     \$26,$d1
436         mov     %edx,`16*6+8-64`($ctx)
437
438         mov     $h2,%rax
439         shl     \$24,%rax
440         or      %rax,$d1
441         mov     $d1#d,`16*7+8-64`($ctx)
442         lea     ($d1,$d1,4),$d1         # *5
443         mov     $d1#d,`16*8+8-64`($ctx)
444
445         lea     -48-64($ctx),$ctx       # size [de-]optimization
446         ret
447 .size   __poly1305_init_avx,.-__poly1305_init_avx
448
449 .type   poly1305_blocks_avx,\@function,4
450 .align  32
451 poly1305_blocks_avx:
452         mov     20($ctx),%r8d           # is_base2_26
453         cmp     \$128,$len
454         jae     .Lblocks_avx
455         test    %r8d,%r8d
456         jz      poly1305_blocks
457
458 .Lblocks_avx:
459         and     \$-16,$len
460         jz      .Lno_data_avx
461
462         vzeroupper
463
464         test    %r8d,%r8d
465         jz      .Lbase2_64_avx
466
467         test    \$31,$len
468         jz      .Leven_avx
469
470         push    %rbx
471         push    %rbp
472         push    %r12
473         push    %r13
474         push    %r14
475         push    %r15
476 .Lblocks_avx_body:
477
478         mov     $len,%r15               # reassign $len
479
480         mov     0($ctx),$d1             # load hash value
481         mov     8($ctx),$d2
482         mov     16($ctx),$h2#d
483
484         mov     24($ctx),$r0            # load r
485         mov     32($ctx),$s1
486
487         ################################# base 2^26 -> base 2^64
488         mov     $d1#d,$h0#d
489         and     \$-1<<31,$d1
490         mov     $d2,$r1                 # borrow $r1
491         mov     $d2#d,$h1#d
492         and     \$-1<<31,$d2
493
494         shr     \$6,$d1
495         shl     \$52,$r1
496         add     $d1,$h0
497         shr     \$12,$h1
498         shr     \$18,$d2
499         add     $r1,$h0
500         adc     $d2,$h1
501
502         mov     $h2,$d1
503         shl     \$40,$d1
504         shr     \$24,$h2
505         add     $d1,$h1
506         adc     \$0,$h2                 # can be partially reduced...
507
508         mov     \$-4,$d2                # ... so reduce
509         mov     $h2,$d1
510         and     $h2,$d2
511         shr     \$2,$d1
512         and     \$3,$h2
513         add     $d2,$d1                 # =*5
514         add     $d1,$h0
515         adc     \$0,$h1
516
517         mov     $s1,$r1
518         mov     $s1,%rax
519         shr     \$2,$s1
520         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
521
522         add     0($inp),$h0             # accumulate input
523         adc     8($inp),$h1
524         lea     16($inp),$inp
525         adc     $padbit,$h2
526
527         call    __poly1305_block
528
529         test    $padbit,$padbit         # if $padbit is zero,
530         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
531
532         ################################# base 2^64 -> base 2^26
533         mov     $h0,%rax
534         mov     $h0,%rdx
535         shr     \$52,$h0
536         mov     $h1,$r0
537         mov     $h1,$r1
538         shr     \$26,%rdx
539         and     \$0x3ffffff,%rax        # h[0]
540         shl     \$12,$r0
541         and     \$0x3ffffff,%rdx        # h[1]
542         shr     \$14,$h1
543         or      $r0,$h0
544         shl     \$24,$h2
545         and     \$0x3ffffff,$h0         # h[2]
546         shr     \$40,$r1
547         and     \$0x3ffffff,$h1         # h[3]
548         or      $r1,$h2                 # h[4]
549
550         sub     \$16,%r15
551         jz      .Lstore_base2_26_avx
552
553         vmovd   %rax#d,$H0
554         vmovd   %rdx#d,$H1
555         vmovd   $h0#d,$H2
556         vmovd   $h1#d,$H3
557         vmovd   $h2#d,$H4
558         jmp     .Lproceed_avx
559
560 .align  32
561 .Lstore_base2_64_avx:
562         mov     $h0,0($ctx)
563         mov     $h1,8($ctx)
564         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
565         jmp     .Ldone_avx
566
567 .align  16
568 .Lstore_base2_26_avx:
569         mov     %rax#d,0($ctx)          # store hash value base 2^26
570         mov     %rdx#d,4($ctx)
571         mov     $h0#d,8($ctx)
572         mov     $h1#d,12($ctx)
573         mov     $h2#d,16($ctx)
574 .align  16
575 .Ldone_avx:
576         mov     0(%rsp),%r15
577         mov     8(%rsp),%r14
578         mov     16(%rsp),%r13
579         mov     24(%rsp),%r12
580         mov     32(%rsp),%rbp
581         mov     40(%rsp),%rbx
582         lea     48(%rsp),%rsp
583 .Lno_data_avx:
584 .Lblocks_avx_epilogue:
585         ret
586
587 .align  32
588 .Lbase2_64_avx:
589         push    %rbx
590         push    %rbp
591         push    %r12
592         push    %r13
593         push    %r14
594         push    %r15
595 .Lbase2_64_avx_body:
596
597         mov     $len,%r15               # reassign $len
598
599         mov     24($ctx),$r0            # load r
600         mov     32($ctx),$s1
601
602         mov     0($ctx),$h0             # load hash value
603         mov     8($ctx),$h1
604         mov     16($ctx),$h2#d
605
606         mov     $s1,$r1
607         mov     $s1,%rax
608         shr     \$2,$s1
609         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
610
611         test    \$31,$len
612         jz      .Linit_avx
613
614         add     0($inp),$h0             # accumulate input
615         adc     8($inp),$h1
616         lea     16($inp),$inp
617         adc     $padbit,$h2
618         sub     \$16,%r15
619
620         call    __poly1305_block
621
622 .Linit_avx:
623         ################################# base 2^64 -> base 2^26
624         mov     $h0,%rax
625         mov     $h0,%rdx
626         shr     \$52,$h0
627         mov     $h1,$d1
628         mov     $h1,$d2
629         shr     \$26,%rdx
630         and     \$0x3ffffff,%rax        # h[0]
631         shl     \$12,$d1
632         and     \$0x3ffffff,%rdx        # h[1]
633         shr     \$14,$h1
634         or      $d1,$h0
635         shl     \$24,$h2
636         and     \$0x3ffffff,$h0         # h[2]
637         shr     \$40,$d2
638         and     \$0x3ffffff,$h1         # h[3]
639         or      $d2,$h2                 # h[4]
640
641         vmovd   %rax#d,$H0
642         vmovd   %rdx#d,$H1
643         vmovd   $h0#d,$H2
644         vmovd   $h1#d,$H3
645         vmovd   $h2#d,$H4
646         movl    \$1,20($ctx)            # set is_base2_26
647
648         call    __poly1305_init_avx
649
650 .Lproceed_avx:
651         mov     %r15,$len
652
653         mov     0(%rsp),%r15
654         mov     8(%rsp),%r14
655         mov     16(%rsp),%r13
656         mov     24(%rsp),%r12
657         mov     32(%rsp),%rbp
658         mov     40(%rsp),%rbx
659         lea     48(%rsp),%rax
660         lea     48(%rsp),%rsp
661 .Lbase2_64_avx_epilogue:
662         jmp     .Ldo_avx
663
664 .align  32
665 .Leven_avx:
666         vmovd           4*0($ctx),$H0           # load hash value
667         vmovd           4*1($ctx),$H1
668         vmovd           4*2($ctx),$H2
669         vmovd           4*3($ctx),$H3
670         vmovd           4*4($ctx),$H4
671
672 .Ldo_avx:
673 ___
674 $code.=<<___    if (!$win64);
675         lea             -0x58(%rsp),%r11
676         sub             \$0x178,%rsp
677 ___
678 $code.=<<___    if ($win64);
679         lea             -0xf8(%rsp),%r11
680         sub             \$0x218,%rsp
681         vmovdqa         %xmm6,0x50(%r11)
682         vmovdqa         %xmm7,0x60(%r11)
683         vmovdqa         %xmm8,0x70(%r11)
684         vmovdqa         %xmm9,0x80(%r11)
685         vmovdqa         %xmm10,0x90(%r11)
686         vmovdqa         %xmm11,0xa0(%r11)
687         vmovdqa         %xmm12,0xb0(%r11)
688         vmovdqa         %xmm13,0xc0(%r11)
689         vmovdqa         %xmm14,0xd0(%r11)
690         vmovdqa         %xmm15,0xe0(%r11)
691 .Ldo_avx_body:
692 ___
693 $code.=<<___;
694         sub             \$64,$len
695         lea             -32($inp),%rax
696         cmovc           %rax,$inp
697
698         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
699         lea             `16*3+64`($ctx),$ctx    # size optimization
700         lea             .Lconst(%rip),%rcx
701
702         ################################################################
703         # load input
704         vmovdqu         16*2($inp),$T0
705         vmovdqu         16*3($inp),$T1
706         vmovdqa         64(%rcx),$MASK          # .Lmask26
707
708         vpsrldq         \$6,$T0,$T2             # splat input
709         vpsrldq         \$6,$T1,$T3
710         vpunpckhqdq     $T1,$T0,$T4             # 4
711         vpunpcklqdq     $T1,$T0,$T0             # 0:1
712         vpunpcklqdq     $T3,$T2,$T3             # 2:3
713
714         vpsrlq          \$40,$T4,$T4            # 4
715         vpsrlq          \$26,$T0,$T1
716         vpand           $MASK,$T0,$T0           # 0
717         vpsrlq          \$4,$T3,$T2
718         vpand           $MASK,$T1,$T1           # 1
719         vpsrlq          \$30,$T3,$T3
720         vpand           $MASK,$T2,$T2           # 2
721         vpand           $MASK,$T3,$T3           # 3
722         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
723
724         jbe             .Lskip_loop_avx
725
726         # expand and copy pre-calculated table to stack
727         vmovdqu         `16*1-64`($ctx),$D1
728         vmovdqu         `16*2-64`($ctx),$D2
729         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
730         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
731         vmovdqa         $D3,-0x90(%r11)
732         vmovdqa         $D0,0x00(%rsp)
733         vpshufd         \$0xEE,$D1,$D4
734         vmovdqu         `16*3-64`($ctx),$D0
735         vpshufd         \$0x44,$D1,$D1
736         vmovdqa         $D4,-0x80(%r11)
737         vmovdqa         $D1,0x10(%rsp)
738         vpshufd         \$0xEE,$D2,$D3
739         vmovdqu         `16*4-64`($ctx),$D1
740         vpshufd         \$0x44,$D2,$D2
741         vmovdqa         $D3,-0x70(%r11)
742         vmovdqa         $D2,0x20(%rsp)
743         vpshufd         \$0xEE,$D0,$D4
744         vmovdqu         `16*5-64`($ctx),$D2
745         vpshufd         \$0x44,$D0,$D0
746         vmovdqa         $D4,-0x60(%r11)
747         vmovdqa         $D0,0x30(%rsp)
748         vpshufd         \$0xEE,$D1,$D3
749         vmovdqu         `16*6-64`($ctx),$D0
750         vpshufd         \$0x44,$D1,$D1
751         vmovdqa         $D3,-0x50(%r11)
752         vmovdqa         $D1,0x40(%rsp)
753         vpshufd         \$0xEE,$D2,$D4
754         vmovdqu         `16*7-64`($ctx),$D1
755         vpshufd         \$0x44,$D2,$D2
756         vmovdqa         $D4,-0x40(%r11)
757         vmovdqa         $D2,0x50(%rsp)
758         vpshufd         \$0xEE,$D0,$D3
759         vmovdqu         `16*8-64`($ctx),$D2
760         vpshufd         \$0x44,$D0,$D0
761         vmovdqa         $D3,-0x30(%r11)
762         vmovdqa         $D0,0x60(%rsp)
763         vpshufd         \$0xEE,$D1,$D4
764         vpshufd         \$0x44,$D1,$D1
765         vmovdqa         $D4,-0x20(%r11)
766         vmovdqa         $D1,0x70(%rsp)
767         vpshufd         \$0xEE,$D2,$D3
768          vmovdqa        0x00(%rsp),$D4          # preload r0^2
769         vpshufd         \$0x44,$D2,$D2
770         vmovdqa         $D3,-0x10(%r11)
771         vmovdqa         $D2,0x80(%rsp)
772
773         jmp             .Loop_avx
774
775 .align  32
776 .Loop_avx:
777         ################################################################
778         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
779         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
780         #   \___________________/
781         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
782         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
783         #   \___________________/ \____________________/
784         #
785         # Note that we start with inp[2:3]*r^2. This is because it
786         # doesn't depend on reduction in previous iteration.
787         ################################################################
788         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
789         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
790         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
791         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
792         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
793         #
794         # though note that $Tx and $Hx are "reversed" in this section,
795         # and $D4 is preloaded with r0^2...
796
797         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
798         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
799           vmovdqa       $H2,0x20(%r11)                          # offload hash
800         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
801          vmovdqa        0x10(%rsp),$H2          # r1^2
802         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
803         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
804
805           vmovdqa       $H0,0x00(%r11)                          #
806         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
807           vmovdqa       $H1,0x10(%r11)                          #
808         vpmuludq        $T3,$H2,$H1             # h3*r1
809         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
810         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
811           vmovdqa       $H3,0x30(%r11)                          #
812         vpmuludq        $T2,$H2,$H0             # h2*r1
813         vpmuludq        $T1,$H2,$H1             # h1*r1
814         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
815          vmovdqa        0x30(%rsp),$H3          # r2^2
816         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
817           vmovdqa       $H4,0x40(%r11)                          #
818         vpmuludq        $T0,$H2,$H2             # h0*r1
819          vpmuludq       $T2,$H3,$H0             # h2*r2
820         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
821
822          vmovdqa        0x40(%rsp),$H4          # s2^2
823         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
824         vpmuludq        $T1,$H3,$H1             # h1*r2
825         vpmuludq        $T0,$H3,$H3             # h0*r2
826         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
827          vmovdqa        0x50(%rsp),$H2          # r3^2
828         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
829         vpmuludq        $T4,$H4,$H0             # h4*s2
830         vpmuludq        $T3,$H4,$H4             # h3*s2
831         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
832          vmovdqa        0x60(%rsp),$H3          # s3^2
833         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
834
835          vmovdqa        0x80(%rsp),$H4          # s4^2
836         vpmuludq        $T1,$H2,$H1             # h1*r3
837         vpmuludq        $T0,$H2,$H2             # h0*r3
838         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
839         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
840         vpmuludq        $T4,$H3,$H0             # h4*s3
841         vpmuludq        $T3,$H3,$H1             # h3*s3
842         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
843          vmovdqu        16*0($inp),$H0                          # load input
844         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
845         vpmuludq        $T2,$H3,$H3             # h2*s3
846          vpmuludq       $T2,$H4,$T2             # h2*s4
847         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
848
849          vmovdqu        16*1($inp),$H1                          #
850         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
851         vpmuludq        $T3,$H4,$T3             # h3*s4
852         vpmuludq        $T4,$H4,$T4             # h4*s4
853          vpsrldq        \$6,$H0,$H2                             # splat input
854         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
855         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
856          vpsrldq        \$6,$H1,$H3                             #
857         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
858         vpmuludq        $T1,$H4,$T0             # h1*s4
859          vpunpckhqdq    $H1,$H0,$H4             # 4
860         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
861          vmovdqa        -0x90(%r11),$T4         # r0^4
862         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
863
864         vpunpcklqdq     $H1,$H0,$H0             # 0:1
865         vpunpcklqdq     $H3,$H2,$H3             # 2:3
866
867         #vpsrlq         \$40,$H4,$H4            # 4
868         vpsrldq         \$`40/8`,$H4,$H4        # 4
869         vpsrlq          \$26,$H0,$H1
870         vpand           $MASK,$H0,$H0           # 0
871         vpsrlq          \$4,$H3,$H2
872         vpand           $MASK,$H1,$H1           # 1
873         vpand           0(%rcx),$H4,$H4         # .Lmask24
874         vpsrlq          \$30,$H3,$H3
875         vpand           $MASK,$H2,$H2           # 2
876         vpand           $MASK,$H3,$H3           # 3
877         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
878
879         vpaddq          0x00(%r11),$H0,$H0      # add hash value
880         vpaddq          0x10(%r11),$H1,$H1
881         vpaddq          0x20(%r11),$H2,$H2
882         vpaddq          0x30(%r11),$H3,$H3
883         vpaddq          0x40(%r11),$H4,$H4
884
885         lea             16*2($inp),%rax
886         lea             16*4($inp),$inp
887         sub             \$64,$len
888         cmovc           %rax,$inp
889
890         ################################################################
891         # Now we accumulate (inp[0:1]+hash)*r^4
892         ################################################################
893         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
894         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
895         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
896         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
897         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
898
899         vpmuludq        $H0,$T4,$T0             # h0*r0
900         vpmuludq        $H1,$T4,$T1             # h1*r0
901         vpaddq          $T0,$D0,$D0
902         vpaddq          $T1,$D1,$D1
903          vmovdqa        -0x80(%r11),$T2         # r1^4
904         vpmuludq        $H2,$T4,$T0             # h2*r0
905         vpmuludq        $H3,$T4,$T1             # h3*r0
906         vpaddq          $T0,$D2,$D2
907         vpaddq          $T1,$D3,$D3
908         vpmuludq        $H4,$T4,$T4             # h4*r0
909          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
910         vpaddq          $T4,$D4,$D4
911
912         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
913         vpmuludq        $H2,$T2,$T1             # h2*r1
914         vpmuludq        $H3,$T2,$T0             # h3*r1
915         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
916          vmovdqa        -0x60(%r11),$T3         # r2^4
917         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
918         vpmuludq        $H1,$T2,$T1             # h1*r1
919         vpmuludq        $H0,$T2,$T2             # h0*r1
920         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
921         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
922
923          vmovdqa        -0x50(%r11),$T4         # s2^4
924         vpmuludq        $H2,$T3,$T0             # h2*r2
925         vpmuludq        $H1,$T3,$T1             # h1*r2
926         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
927         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
928          vmovdqa        -0x40(%r11),$T2         # r3^4
929         vpmuludq        $H0,$T3,$T3             # h0*r2
930         vpmuludq        $H4,$T4,$T0             # h4*s2
931         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
932         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
933          vmovdqa        -0x30(%r11),$T3         # s3^4
934         vpmuludq        $H3,$T4,$T4             # h3*s2
935          vpmuludq       $H1,$T2,$T1             # h1*r3
936         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
937
938          vmovdqa        -0x10(%r11),$T4         # s4^4
939         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
940         vpmuludq        $H0,$T2,$T2             # h0*r3
941         vpmuludq        $H4,$T3,$T0             # h4*s3
942         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
943         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
944          vmovdqu        16*2($inp),$T0                          # load input
945         vpmuludq        $H3,$T3,$T2             # h3*s3
946         vpmuludq        $H2,$T3,$T3             # h2*s3
947         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
948          vmovdqu        16*3($inp),$T1                          #
949         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
950
951         vpmuludq        $H2,$T4,$H2             # h2*s4
952         vpmuludq        $H3,$T4,$H3             # h3*s4
953          vpsrldq        \$6,$T0,$T2                             # splat input
954         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
955         vpmuludq        $H4,$T4,$H4             # h4*s4
956          vpsrldq        \$6,$T1,$T3                             #
957         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
958         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
959         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
960         vpmuludq        $H1,$T4,$H0
961          vpunpckhqdq    $T1,$T0,$T4             # 4
962         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
963         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
964
965         vpunpcklqdq     $T1,$T0,$T0             # 0:1
966         vpunpcklqdq     $T3,$T2,$T3             # 2:3
967
968         #vpsrlq         \$40,$T4,$T4            # 4
969         vpsrldq         \$`40/8`,$T4,$T4        # 4
970         vpsrlq          \$26,$T0,$T1
971          vmovdqa        0x00(%rsp),$D4          # preload r0^2
972         vpand           $MASK,$T0,$T0           # 0
973         vpsrlq          \$4,$T3,$T2
974         vpand           $MASK,$T1,$T1           # 1
975         vpand           0(%rcx),$T4,$T4         # .Lmask24
976         vpsrlq          \$30,$T3,$T3
977         vpand           $MASK,$T2,$T2           # 2
978         vpand           $MASK,$T3,$T3           # 3
979         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
980
981         ################################################################
982         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
983         # and P. Schwabe
984
985         vpsrlq          \$26,$H3,$D3
986         vpand           $MASK,$H3,$H3
987         vpaddq          $D3,$H4,$H4             # h3 -> h4
988
989         vpsrlq          \$26,$H0,$D0
990         vpand           $MASK,$H0,$H0
991         vpaddq          $D0,$D1,$H1             # h0 -> h1
992
993         vpsrlq          \$26,$H4,$D0
994         vpand           $MASK,$H4,$H4
995
996         vpsrlq          \$26,$H1,$D1
997         vpand           $MASK,$H1,$H1
998         vpaddq          $D1,$H2,$H2             # h1 -> h2
999
1000         vpaddq          $D0,$H0,$H0
1001         vpsllq          \$2,$D0,$D0
1002         vpaddq          $D0,$H0,$H0             # h4 -> h0
1003
1004         vpsrlq          \$26,$H2,$D2
1005         vpand           $MASK,$H2,$H2
1006         vpaddq          $D2,$H3,$H3             # h2 -> h3
1007
1008         vpsrlq          \$26,$H0,$D0
1009         vpand           $MASK,$H0,$H0
1010         vpaddq          $D0,$H1,$H1             # h0 -> h1
1011
1012         vpsrlq          \$26,$H3,$D3
1013         vpand           $MASK,$H3,$H3
1014         vpaddq          $D3,$H4,$H4             # h3 -> h4
1015
1016         ja              .Loop_avx
1017
1018 .Lskip_loop_avx:
1019         ################################################################
1020         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1021
1022         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1023         add             \$32,$len
1024         jnz             .Long_tail_avx
1025
1026         vpaddq          $H2,$T2,$T2
1027         vpaddq          $H0,$T0,$T0
1028         vpaddq          $H1,$T1,$T1
1029         vpaddq          $H3,$T3,$T3
1030         vpaddq          $H4,$T4,$T4
1031
1032 .Long_tail_avx:
1033         vmovdqa         $H2,0x20(%r11)
1034         vmovdqa         $H0,0x00(%r11)
1035         vmovdqa         $H1,0x10(%r11)
1036         vmovdqa         $H3,0x30(%r11)
1037         vmovdqa         $H4,0x40(%r11)
1038
1039         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1040         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1041         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1042         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1043         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1044
1045         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1046         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1047          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1048         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1049         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1050         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1051
1052         vpmuludq        $T3,$H2,$H0             # h3*r1
1053         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1054          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1055         vpmuludq        $T2,$H2,$H1             # h2*r1
1056         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1057          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1058         vpmuludq        $T1,$H2,$H0             # h1*r1
1059         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1060         vpmuludq        $T0,$H2,$H2             # h0*r1
1061         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1062         vpmuludq        $T4,$H3,$H3             # h4*s1
1063         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1064
1065          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1066         vpmuludq        $T2,$H4,$H1             # h2*r2
1067         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1068         vpmuludq        $T1,$H4,$H0             # h1*r2
1069         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1070          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1071         vpmuludq        $T0,$H4,$H4             # h0*r2
1072         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1073         vpmuludq        $T4,$H2,$H1             # h4*s2
1074         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1075          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1076         vpmuludq        $T3,$H2,$H2             # h3*s2
1077         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1078
1079         vpmuludq        $T1,$H3,$H0             # h1*r3
1080         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1081         vpmuludq        $T0,$H3,$H3             # h0*r3
1082         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1083          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1084         vpmuludq        $T4,$H4,$H1             # h4*s3
1085         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1086          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1087         vpmuludq        $T3,$H4,$H0             # h3*s3
1088         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1089         vpmuludq        $T2,$H4,$H4             # h2*s3
1090         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1091
1092         vpmuludq        $T0,$H2,$H2             # h0*r4
1093         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1094         vpmuludq        $T4,$H3,$H1             # h4*s4
1095         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1096         vpmuludq        $T3,$H3,$H0             # h3*s4
1097         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1098         vpmuludq        $T2,$H3,$H1             # h2*s4
1099         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1100         vpmuludq        $T1,$H3,$H3             # h1*s4
1101         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1102
1103         jz              .Lshort_tail_avx
1104
1105         vmovdqu         16*0($inp),$H0          # load input
1106         vmovdqu         16*1($inp),$H1
1107
1108         vpsrldq         \$6,$H0,$H2             # splat input
1109         vpsrldq         \$6,$H1,$H3
1110         vpunpckhqdq     $H1,$H0,$H4             # 4
1111         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1112         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1113
1114         vpsrlq          \$40,$H4,$H4            # 4
1115         vpsrlq          \$26,$H0,$H1
1116         vpand           $MASK,$H0,$H0           # 0
1117         vpsrlq          \$4,$H3,$H2
1118         vpand           $MASK,$H1,$H1           # 1
1119         vpsrlq          \$30,$H3,$H3
1120         vpand           $MASK,$H2,$H2           # 2
1121         vpand           $MASK,$H3,$H3           # 3
1122         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1123
1124         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1125         vpaddq          0x00(%r11),$H0,$H0
1126         vpaddq          0x10(%r11),$H1,$H1
1127         vpaddq          0x20(%r11),$H2,$H2
1128         vpaddq          0x30(%r11),$H3,$H3
1129         vpaddq          0x40(%r11),$H4,$H4
1130
1131         ################################################################
1132         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1133
1134         vpmuludq        $H0,$T4,$T0             # h0*r0
1135         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1136         vpmuludq        $H1,$T4,$T1             # h1*r0
1137         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1138         vpmuludq        $H2,$T4,$T0             # h2*r0
1139         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1140          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1141         vpmuludq        $H3,$T4,$T1             # h3*r0
1142         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1143         vpmuludq        $H4,$T4,$T4             # h4*r0
1144         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1145
1146         vpmuludq        $H3,$T2,$T0             # h3*r1
1147         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1148          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1149         vpmuludq        $H2,$T2,$T1             # h2*r1
1150         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1151          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1152         vpmuludq        $H1,$T2,$T0             # h1*r1
1153         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1154         vpmuludq        $H0,$T2,$T2             # h0*r1
1155         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1156         vpmuludq        $H4,$T3,$T3             # h4*s1
1157         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1158
1159          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1160         vpmuludq        $H2,$T4,$T1             # h2*r2
1161         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1162         vpmuludq        $H1,$T4,$T0             # h1*r2
1163         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1164          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1165         vpmuludq        $H0,$T4,$T4             # h0*r2
1166         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1167         vpmuludq        $H4,$T2,$T1             # h4*s2
1168         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1169          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1170         vpmuludq        $H3,$T2,$T2             # h3*s2
1171         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1172
1173         vpmuludq        $H1,$T3,$T0             # h1*r3
1174         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1175         vpmuludq        $H0,$T3,$T3             # h0*r3
1176         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1177          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1178         vpmuludq        $H4,$T4,$T1             # h4*s3
1179         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1180          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1181         vpmuludq        $H3,$T4,$T0             # h3*s3
1182         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1183         vpmuludq        $H2,$T4,$T4             # h2*s3
1184         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1185
1186         vpmuludq        $H0,$T2,$T2             # h0*r4
1187         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1188         vpmuludq        $H4,$T3,$T1             # h4*s4
1189         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1190         vpmuludq        $H3,$T3,$T0             # h3*s4
1191         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1192         vpmuludq        $H2,$T3,$T1             # h2*s4
1193         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1194         vpmuludq        $H1,$T3,$T3             # h1*s4
1195         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1196
1197 .Lshort_tail_avx:
1198         ################################################################
1199         # lazy reduction
1200
1201         vpsrlq          \$26,$D3,$H3
1202         vpand           $MASK,$D3,$D3
1203         vpaddq          $H3,$D4,$D4             # h3 -> h4
1204
1205         vpsrlq          \$26,$D0,$H0
1206         vpand           $MASK,$D0,$D0
1207         vpaddq          $H0,$D1,$D1             # h0 -> h1
1208
1209         vpsrlq          \$26,$D4,$H4
1210         vpand           $MASK,$D4,$D4
1211
1212         vpsrlq          \$26,$D1,$H1
1213         vpand           $MASK,$D1,$D1
1214         vpaddq          $H1,$D2,$D2             # h1 -> h2
1215
1216         vpaddq          $H4,$D0,$D0
1217         vpsllq          \$2,$H4,$H4
1218         vpaddq          $H4,$D0,$D0             # h4 -> h0
1219
1220         vpsrlq          \$26,$D2,$H2
1221         vpand           $MASK,$D2,$D2
1222         vpaddq          $H2,$D3,$D3             # h2 -> h3
1223
1224         vpsrlq          \$26,$D0,$H0
1225         vpand           $MASK,$D0,$D0
1226         vpaddq          $H0,$D1,$D1             # h0 -> h1
1227
1228         vpsrlq          \$26,$D3,$H3
1229         vpand           $MASK,$D3,$D3
1230         vpaddq          $H3,$D4,$D4             # h3 -> h4
1231
1232         ################################################################
1233         # horizontal addition
1234
1235         vpsrldq         \$8,$D2,$T2
1236         vpsrldq         \$8,$D0,$T0
1237         vpsrldq         \$8,$D1,$T1
1238         vpsrldq         \$8,$D3,$T3
1239         vpsrldq         \$8,$D4,$T4
1240         vpaddq          $T2,$D2,$H2
1241         vpaddq          $T0,$D0,$H0
1242         vpaddq          $T1,$D1,$H1
1243         vpaddq          $T3,$D3,$H3
1244         vpaddq          $T4,$D4,$H4
1245
1246         vmovd           $H0,`4*0-48-64`($ctx)   # save partially reduced
1247         vmovd           $H1,`4*1-48-64`($ctx)
1248         vmovd           $H2,`4*2-48-64`($ctx)
1249         vmovd           $H3,`4*3-48-64`($ctx)
1250         vmovd           $H4,`4*4-48-64`($ctx)
1251 ___
1252 $code.=<<___    if ($win64);
1253         vmovdqa         0x50(%r11),%xmm6
1254         vmovdqa         0x60(%r11),%xmm7
1255         vmovdqa         0x70(%r11),%xmm8
1256         vmovdqa         0x80(%r11),%xmm9
1257         vmovdqa         0x90(%r11),%xmm10
1258         vmovdqa         0xa0(%r11),%xmm11
1259         vmovdqa         0xb0(%r11),%xmm12
1260         vmovdqa         0xc0(%r11),%xmm13
1261         vmovdqa         0xd0(%r11),%xmm14
1262         vmovdqa         0xe0(%r11),%xmm15
1263         lea             0xf8(%r11),%rsp
1264 .Ldo_avx_epilogue:
1265 ___
1266 $code.=<<___    if (!$win64);
1267         lea             0x58(%r11),%rsp
1268 ___
1269 $code.=<<___;
1270         vzeroupper
1271         ret
1272 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1273
1274 .type   poly1305_emit_avx,\@function,3
1275 .align  32
1276 poly1305_emit_avx:
1277         cmpl    \$0,20($ctx)    # is_base2_26?
1278         je      poly1305_emit
1279
1280         mov     0($ctx),%eax    # load hash value base 2^26
1281         mov     4($ctx),%ecx
1282         mov     8($ctx),%r8d
1283         mov     12($ctx),%r11d
1284         mov     16($ctx),%r10d
1285
1286         shl     \$26,%rcx       # base 2^26 -> base 2^64
1287         mov     %r8,%r9
1288         shl     \$52,%r8
1289         add     %rcx,%rax
1290         shr     \$12,%r9
1291         add     %rax,%r8        # h0
1292         adc     \$0,%r9
1293
1294         shl     \$14,%r11
1295         mov     %r10,%rax
1296         shr     \$24,%r10
1297         add     %r11,%r9
1298         shl     \$40,%rax
1299         add     %rax,%r9        # h1
1300         adc     \$0,%r10        # h2
1301
1302         mov     %r10,%rax       # could be partially reduced, so reduce
1303         mov     %r10,%rcx
1304         and     \$3,%r10
1305         shr     \$2,%rax
1306         and     \$-4,%rcx
1307         add     %rcx,%rax
1308         add     %rax,%r8
1309         adc     \$0,%r9
1310
1311         mov     %r8,%rax
1312         add     \$5,%r8         # compare to modulus
1313         mov     %r9,%rcx
1314         adc     \$0,%r9
1315         adc     \$0,%r10
1316         shr     \$2,%r10        # did 130-bit value overfow?
1317         cmovnz  %r8,%rax
1318         cmovnz  %r9,%rcx
1319
1320         add     0($nonce),%rax  # accumulate nonce
1321         adc     8($nonce),%rcx
1322         mov     %rax,0($mac)    # write result
1323         mov     %rcx,8($mac)
1324
1325         ret
1326 .size   poly1305_emit_avx,.-poly1305_emit_avx
1327 ___
1328
1329 if ($avx>1) {
1330 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1331     map("%ymm$_",(0..15));
1332 my $S4=$MASK;
1333
1334 $code.=<<___;
1335 .type   poly1305_blocks_avx2,\@function,4
1336 .align  32
1337 poly1305_blocks_avx2:
1338         mov     20($ctx),%r8d           # is_base2_26
1339         cmp     \$128,$len
1340         jae     .Lblocks_avx2
1341         test    %r8d,%r8d
1342         jz      poly1305_blocks
1343
1344 .Lblocks_avx2:
1345         and     \$-16,$len
1346         jz      .Lno_data_avx2
1347
1348         vzeroupper
1349
1350         test    %r8d,%r8d
1351         jz      .Lbase2_64_avx2
1352
1353         test    \$63,$len
1354         jz      .Leven_avx2
1355
1356         push    %rbx
1357         push    %rbp
1358         push    %r12
1359         push    %r13
1360         push    %r14
1361         push    %r15
1362 .Lblocks_avx2_body:
1363
1364         mov     $len,%r15               # reassign $len
1365
1366         mov     0($ctx),$d1             # load hash value
1367         mov     8($ctx),$d2
1368         mov     16($ctx),$h2#d
1369
1370         mov     24($ctx),$r0            # load r
1371         mov     32($ctx),$s1
1372
1373         ################################# base 2^26 -> base 2^64
1374         mov     $d1#d,$h0#d
1375         and     \$-1<<31,$d1
1376         mov     $d2,$r1                 # borrow $r1
1377         mov     $d2#d,$h1#d
1378         and     \$-1<<31,$d2
1379
1380         shr     \$6,$d1
1381         shl     \$52,$r1
1382         add     $d1,$h0
1383         shr     \$12,$h1
1384         shr     \$18,$d2
1385         add     $r1,$h0
1386         adc     $d2,$h1
1387
1388         mov     $h2,$d1
1389         shl     \$40,$d1
1390         shr     \$24,$h2
1391         add     $d1,$h1
1392         adc     \$0,$h2                 # can be partially reduced...
1393
1394         mov     \$-4,$d2                # ... so reduce
1395         mov     $h2,$d1
1396         and     $h2,$d2
1397         shr     \$2,$d1
1398         and     \$3,$h2
1399         add     $d2,$d1                 # =*5
1400         add     $d1,$h0
1401         adc     \$0,$h1
1402
1403         mov     $s1,$r1
1404         mov     $s1,%rax
1405         shr     \$2,$s1
1406         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1407
1408 .Lbase2_26_pre_avx2:
1409         add     0($inp),$h0             # accumulate input
1410         adc     8($inp),$h1
1411         lea     16($inp),$inp
1412         adc     $padbit,$h2
1413         sub     \$16,%r15
1414
1415         call    __poly1305_block
1416         mov     $r1,%rax
1417
1418         test    \$63,%r15
1419         jnz     .Lbase2_26_pre_avx2
1420
1421         test    $padbit,$padbit         # if $padbit is zero,
1422         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1423
1424         ################################# base 2^64 -> base 2^26
1425         mov     $h0,%rax
1426         mov     $h0,%rdx
1427         shr     \$52,$h0
1428         mov     $h1,$r0
1429         mov     $h1,$r1
1430         shr     \$26,%rdx
1431         and     \$0x3ffffff,%rax        # h[0]
1432         shl     \$12,$r0
1433         and     \$0x3ffffff,%rdx        # h[1]
1434         shr     \$14,$h1
1435         or      $r0,$h0
1436         shl     \$24,$h2
1437         and     \$0x3ffffff,$h0         # h[2]
1438         shr     \$40,$r1
1439         and     \$0x3ffffff,$h1         # h[3]
1440         or      $r1,$h2                 # h[4]
1441
1442         test    %r15,%r15
1443         jz      .Lstore_base2_26_avx2
1444
1445         vmovd   %rax#d,%x#$H0
1446         vmovd   %rdx#d,%x#$H1
1447         vmovd   $h0#d,%x#$H2
1448         vmovd   $h1#d,%x#$H3
1449         vmovd   $h2#d,%x#$H4
1450         jmp     .Lproceed_avx2
1451
1452 .align  32
1453 .Lstore_base2_64_avx2:
1454         mov     $h0,0($ctx)
1455         mov     $h1,8($ctx)
1456         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1457         jmp     .Ldone_avx2
1458
1459 .align  16
1460 .Lstore_base2_26_avx2:
1461         mov     %rax#d,0($ctx)          # store hash value base 2^26
1462         mov     %rdx#d,4($ctx)
1463         mov     $h0#d,8($ctx)
1464         mov     $h1#d,12($ctx)
1465         mov     $h2#d,16($ctx)
1466 .align  16
1467 .Ldone_avx2:
1468         mov     0(%rsp),%r15
1469         mov     8(%rsp),%r14
1470         mov     16(%rsp),%r13
1471         mov     24(%rsp),%r12
1472         mov     32(%rsp),%rbp
1473         mov     40(%rsp),%rbx
1474         lea     48(%rsp),%rsp
1475 .Lno_data_avx2:
1476 .Lblocks_avx2_epilogue:
1477         ret
1478
1479 .align  32
1480 .Lbase2_64_avx2:
1481         push    %rbx
1482         push    %rbp
1483         push    %r12
1484         push    %r13
1485         push    %r14
1486         push    %r15
1487 .Lbase2_64_avx2_body:
1488
1489         mov     $len,%r15               # reassign $len
1490
1491         mov     24($ctx),$r0            # load r
1492         mov     32($ctx),$s1
1493
1494         mov     0($ctx),$h0             # load hash value
1495         mov     8($ctx),$h1
1496         mov     16($ctx),$h2#d
1497
1498         mov     $s1,$r1
1499         mov     $s1,%rax
1500         shr     \$2,$s1
1501         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1502
1503         test    \$63,$len
1504         jz      .Linit_avx2
1505
1506 .Lbase2_64_pre_avx2:
1507         add     0($inp),$h0             # accumulate input
1508         adc     8($inp),$h1
1509         lea     16($inp),$inp
1510         adc     $padbit,$h2
1511         sub     \$16,%r15
1512
1513         call    __poly1305_block
1514         mov     $r1,%rax
1515
1516         test    \$63,%r15
1517         jnz     .Lbase2_64_pre_avx2
1518
1519 .Linit_avx2:
1520         ################################# base 2^64 -> base 2^26
1521         mov     $h0,%rax
1522         mov     $h0,%rdx
1523         shr     \$52,$h0
1524         mov     $h1,$d1
1525         mov     $h1,$d2
1526         shr     \$26,%rdx
1527         and     \$0x3ffffff,%rax        # h[0]
1528         shl     \$12,$d1
1529         and     \$0x3ffffff,%rdx        # h[1]
1530         shr     \$14,$h1
1531         or      $d1,$h0
1532         shl     \$24,$h2
1533         and     \$0x3ffffff,$h0         # h[2]
1534         shr     \$40,$d2
1535         and     \$0x3ffffff,$h1         # h[3]
1536         or      $d2,$h2                 # h[4]
1537
1538         vmovd   %rax#d,%x#$H0
1539         vmovd   %rdx#d,%x#$H1
1540         vmovd   $h0#d,%x#$H2
1541         vmovd   $h1#d,%x#$H3
1542         vmovd   $h2#d,%x#$H4
1543         movl    \$1,20($ctx)            # set is_base2_26
1544
1545         call    __poly1305_init_avx
1546
1547 .Lproceed_avx2:
1548         mov     %r15,$len
1549
1550         mov     0(%rsp),%r15
1551         mov     8(%rsp),%r14
1552         mov     16(%rsp),%r13
1553         mov     24(%rsp),%r12
1554         mov     32(%rsp),%rbp
1555         mov     40(%rsp),%rbx
1556         lea     48(%rsp),%rax
1557         lea     48(%rsp),%rsp
1558 .Lbase2_64_avx2_epilogue:
1559         jmp     .Ldo_avx2
1560
1561 .align  32
1562 .Leven_avx2:
1563         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1564         vmovd           4*1($ctx),%x#$H1
1565         vmovd           4*2($ctx),%x#$H2
1566         vmovd           4*3($ctx),%x#$H3
1567         vmovd           4*4($ctx),%x#$H4
1568
1569 .Ldo_avx2:
1570 ___
1571 $code.=<<___    if (!$win64);
1572         lea             -8(%rsp),%r11
1573         sub             \$0x128,%rsp
1574 ___
1575 $code.=<<___    if ($win64);
1576         lea             -0xf8(%rsp),%r11
1577         sub             \$0x1c8,%rsp
1578         vmovdqa         %xmm6,0x50(%r11)
1579         vmovdqa         %xmm7,0x60(%r11)
1580         vmovdqa         %xmm8,0x70(%r11)
1581         vmovdqa         %xmm9,0x80(%r11)
1582         vmovdqa         %xmm10,0x90(%r11)
1583         vmovdqa         %xmm11,0xa0(%r11)
1584         vmovdqa         %xmm12,0xb0(%r11)
1585         vmovdqa         %xmm13,0xc0(%r11)
1586         vmovdqa         %xmm14,0xd0(%r11)
1587         vmovdqa         %xmm15,0xe0(%r11)
1588 .Ldo_avx2_body:
1589 ___
1590 $code.=<<___;
1591         lea             48+64($ctx),$ctx        # size optimization
1592         lea             .Lconst(%rip),%rcx
1593
1594         # expand and copy pre-calculated table to stack
1595         vmovdqu         `16*0-64`($ctx),%x#$T2
1596         and             \$-512,%rsp
1597         vmovdqu         `16*1-64`($ctx),%x#$T3
1598         vmovdqu         `16*2-64`($ctx),%x#$T4
1599         vmovdqu         `16*3-64`($ctx),%x#$D0
1600         vmovdqu         `16*4-64`($ctx),%x#$D1
1601         vmovdqu         `16*5-64`($ctx),%x#$D2
1602         vmovdqu         `16*6-64`($ctx),%x#$D3
1603         vpermq          \$0x15,$T2,$T2          # 00003412 -> 12343434
1604         vmovdqu         `16*7-64`($ctx),%x#$D4
1605         vpermq          \$0x15,$T3,$T3
1606         vpshufd         \$0xc8,$T2,$T2          # 12343434 -> 14243444
1607         vmovdqu         `16*8-64`($ctx),%x#$MASK
1608         vpermq          \$0x15,$T4,$T4
1609         vpshufd         \$0xc8,$T3,$T3
1610         vmovdqa         $T2,0x00(%rsp)
1611         vpermq          \$0x15,$D0,$D0
1612         vpshufd         \$0xc8,$T4,$T4
1613         vmovdqa         $T3,0x20(%rsp)
1614         vpermq          \$0x15,$D1,$D1
1615         vpshufd         \$0xc8,$D0,$D0
1616         vmovdqa         $T4,0x40(%rsp)
1617         vpermq          \$0x15,$D2,$D2
1618         vpshufd         \$0xc8,$D1,$D1
1619         vmovdqa         $D0,0x60(%rsp)
1620         vpermq          \$0x15,$D3,$D3
1621         vpshufd         \$0xc8,$D2,$D2
1622         vmovdqa         $D1,0x80(%rsp)
1623         vpermq          \$0x15,$D4,$D4
1624         vpshufd         \$0xc8,$D3,$D3
1625         vmovdqa         $D2,0xa0(%rsp)
1626         vpermq          \$0x15,$MASK,$MASK
1627         vpshufd         \$0xc8,$D4,$D4
1628         vmovdqa         $D3,0xc0(%rsp)
1629         vpshufd         \$0xc8,$MASK,$MASK
1630         vmovdqa         $D4,0xe0(%rsp)
1631         vmovdqa         $MASK,0x100(%rsp)
1632         vmovdqa         64(%rcx),$MASK          # .Lmask26
1633
1634         ################################################################
1635         # load input
1636         vmovdqu         16*0($inp),%x#$T0
1637         vmovdqu         16*1($inp),%x#$T1
1638         vinserti128     \$1,16*2($inp),$T0,$T0
1639         vinserti128     \$1,16*3($inp),$T1,$T1
1640         lea             16*4($inp),$inp
1641
1642         vpsrldq         \$6,$T0,$T2             # splat input
1643         vpsrldq         \$6,$T1,$T3
1644         vpunpckhqdq     $T1,$T0,$T4             # 4
1645         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1646         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1647
1648         vpsrlq          \$30,$T2,$T3
1649         vpsrlq          \$4,$T2,$T2
1650         vpsrlq          \$26,$T0,$T1
1651         vpsrlq          \$40,$T4,$T4            # 4
1652         vpand           $MASK,$T2,$T2           # 2
1653         vpand           $MASK,$T0,$T0           # 0
1654         vpand           $MASK,$T1,$T1           # 1
1655         vpand           $MASK,$T3,$T3           # 3
1656         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1657
1658         lea             0x90(%rsp),%rax         # size optimization
1659         vpaddq          $H2,$T2,$H2             # accumulate input
1660         sub             \$64,$len
1661         jz              .Ltail_avx2
1662         jmp             .Loop_avx2
1663
1664 .align  32
1665 .Loop_avx2:
1666         ################################################################
1667         # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1668         # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1669         # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1670         # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1671         #   \________/\________/
1672         ################################################################
1673         #vpaddq         $H2,$T2,$H2             # accumulate input
1674         vpaddq          $H0,$T0,$H0
1675         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1676         vpaddq          $H1,$T1,$H1
1677         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1678         vpaddq          $H3,$T3,$H3
1679         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1680         vpaddq          $H4,$T4,$H4
1681         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1682         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1683
1684         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1685         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1686         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1687         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1688         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1689         #
1690         # however, as h2 is "chronologically" first one available pull
1691         # corresponding operations up, so it's
1692         #
1693         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1694         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1695         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1696         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1697         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1698
1699         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1700         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1701         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1702         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1703         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1704
1705         vpmuludq        $H0,$T1,$T4             # h0*r1
1706         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1707         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1708         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1709         vpmuludq        $H3,$T1,$T4             # h3*r1
1710         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1711         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1712         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1713          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1714
1715         vpmuludq        $H0,$T0,$T4             # h0*r0
1716         vpmuludq        $H1,$T0,$H2             # h1*r0
1717         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1718         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1719         vpmuludq        $H3,$T0,$T4             # h3*r0
1720         vpmuludq        $H4,$T0,$H2             # h4*r0
1721          vmovdqu        16*0($inp),%x#$T0       # load input
1722         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1723         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1724          vinserti128    \$1,16*2($inp),$T0,$T0
1725
1726         vpmuludq        $H3,$T1,$T4             # h3*s2
1727         vpmuludq        $H4,$T1,$H2             # h4*s2
1728          vmovdqu        16*1($inp),%x#$T1
1729         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1730         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1731          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1732         vpmuludq        $H1,$T2,$T4             # h1*r2
1733         vpmuludq        $H0,$T2,$T2             # h0*r2
1734         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1735         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1736          vinserti128    \$1,16*3($inp),$T1,$T1
1737          lea            16*4($inp),$inp
1738
1739         vpmuludq        $H1,$H2,$T4             # h1*r3
1740         vpmuludq        $H0,$H2,$H2             # h0*r3
1741          vpsrldq        \$6,$T0,$T2             # splat input
1742         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1743         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1744         vpmuludq        $H3,$T3,$T4             # h3*s3
1745         vpmuludq        $H4,$T3,$H2             # h4*s3
1746          vpsrldq        \$6,$T1,$T3
1747         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1748         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1749          vpunpckhqdq    $T1,$T0,$T4             # 4
1750
1751         vpmuludq        $H3,$S4,$H3             # h3*s4
1752         vpmuludq        $H4,$S4,$H4             # h4*s4
1753          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1754         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1755         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1756          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1757         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1758         vpmuludq        $H1,$S4,$H0             # h1*s4
1759         vmovdqa         64(%rcx),$MASK          # .Lmask26
1760         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1761         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1762
1763         ################################################################
1764         # lazy reduction (interleaved with tail of input splat)
1765
1766         vpsrlq          \$26,$H3,$D3
1767         vpand           $MASK,$H3,$H3
1768         vpaddq          $D3,$H4,$H4             # h3 -> h4
1769
1770         vpsrlq          \$26,$H0,$D0
1771         vpand           $MASK,$H0,$H0
1772         vpaddq          $D0,$D1,$H1             # h0 -> h1
1773
1774         vpsrlq          \$26,$H4,$D4
1775         vpand           $MASK,$H4,$H4
1776
1777          vpsrlq         \$4,$T3,$T2
1778
1779         vpsrlq          \$26,$H1,$D1
1780         vpand           $MASK,$H1,$H1
1781         vpaddq          $D1,$H2,$H2             # h1 -> h2
1782
1783         vpaddq          $D4,$H0,$H0
1784         vpsllq          \$2,$D4,$D4
1785         vpaddq          $D4,$H0,$H0             # h4 -> h0
1786
1787          vpand          $MASK,$T2,$T2           # 2
1788          vpsrlq         \$26,$T0,$T1
1789
1790         vpsrlq          \$26,$H2,$D2
1791         vpand           $MASK,$H2,$H2
1792         vpaddq          $D2,$H3,$H3             # h2 -> h3
1793
1794          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1795          vpsrlq         \$30,$T3,$T3
1796
1797         vpsrlq          \$26,$H0,$D0
1798         vpand           $MASK,$H0,$H0
1799         vpaddq          $D0,$H1,$H1             # h0 -> h1
1800
1801          vpsrlq         \$40,$T4,$T4            # 4
1802
1803         vpsrlq          \$26,$H3,$D3
1804         vpand           $MASK,$H3,$H3
1805         vpaddq          $D3,$H4,$H4             # h3 -> h4
1806
1807          vpand          $MASK,$T0,$T0           # 0
1808          vpand          $MASK,$T1,$T1           # 1
1809          vpand          $MASK,$T3,$T3           # 3
1810          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1811
1812         sub             \$64,$len
1813         jnz             .Loop_avx2
1814
1815         .byte           0x66,0x90
1816 .Ltail_avx2:
1817         ################################################################
1818         # while above multiplications were by r^4 in all lanes, in last
1819         # iteration we multiply least significant lane by r^4 and most
1820         # significant one by r, so copy of above except that references
1821         # to the precomputed table are displaced by 4...
1822
1823         #vpaddq         $H2,$T2,$H2             # accumulate input
1824         vpaddq          $H0,$T0,$H0
1825         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1826         vpaddq          $H1,$T1,$H1
1827         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1828         vpaddq          $H3,$T3,$H3
1829         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1830         vpaddq          $H4,$T4,$H4
1831         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1832         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1833
1834         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1835         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1836         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1837         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1838         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1839
1840         vpmuludq        $H0,$T1,$T4             # h0*r1
1841         vpmuludq        $H1,$T1,$H2             # h1*r1
1842         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1843         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1844         vpmuludq        $H3,$T1,$T4             # h3*r1
1845         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1846         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1847         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1848
1849         vpmuludq        $H0,$T0,$T4             # h0*r0
1850         vpmuludq        $H1,$T0,$H2             # h1*r0
1851         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1852          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1853         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1854         vpmuludq        $H3,$T0,$T4             # h3*r0
1855         vpmuludq        $H4,$T0,$H2             # h4*r0
1856         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1857         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1858
1859         vpmuludq        $H3,$T1,$T4             # h3*s2
1860         vpmuludq        $H4,$T1,$H2             # h4*s2
1861         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1862         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1863          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1864         vpmuludq        $H1,$T2,$T4             # h1*r2
1865         vpmuludq        $H0,$T2,$T2             # h0*r2
1866         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1867         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1868
1869         vpmuludq        $H1,$H2,$T4             # h1*r3
1870         vpmuludq        $H0,$H2,$H2             # h0*r3
1871         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1872         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1873         vpmuludq        $H3,$T3,$T4             # h3*s3
1874         vpmuludq        $H4,$T3,$H2             # h4*s3
1875         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1876         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1877
1878         vpmuludq        $H3,$S4,$H3             # h3*s4
1879         vpmuludq        $H4,$S4,$H4             # h4*s4
1880         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1881         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1882         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
1883         vpmuludq        $H1,$S4,$H0             # h1*s4
1884         vmovdqa         64(%rcx),$MASK          # .Lmask26
1885         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1886         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1887
1888         ################################################################
1889         # lazy reduction
1890
1891         vpsrlq          \$26,$H3,$D3
1892         vpand           $MASK,$H3,$H3
1893         vpaddq          $D3,$H4,$H4             # h3 -> h4
1894
1895         vpsrlq          \$26,$H0,$D0
1896         vpand           $MASK,$H0,$H0
1897         vpaddq          $D0,$D1,$H1             # h0 -> h1
1898
1899         vpsrlq          \$26,$H4,$D4
1900         vpand           $MASK,$H4,$H4
1901
1902         vpsrlq          \$26,$H1,$D1
1903         vpand           $MASK,$H1,$H1
1904         vpaddq          $D1,$H2,$H2             # h1 -> h2
1905
1906         vpaddq          $D4,$H0,$H0
1907         vpsllq          \$2,$D4,$D4
1908         vpaddq          $D4,$H0,$H0             # h4 -> h0
1909
1910         vpsrlq          \$26,$H2,$D2
1911         vpand           $MASK,$H2,$H2
1912         vpaddq          $D2,$H3,$H3             # h2 -> h3
1913
1914         vpsrlq          \$26,$H0,$D0
1915         vpand           $MASK,$H0,$H0
1916         vpaddq          $D0,$H1,$H1             # h0 -> h1
1917
1918         vpsrlq          \$26,$H3,$D3
1919         vpand           $MASK,$H3,$H3
1920         vpaddq          $D3,$H4,$H4             # h3 -> h4
1921
1922         ################################################################
1923         # horizontal addition
1924
1925         vpsrldq         \$8,$H2,$T2
1926         vpsrldq         \$8,$H0,$T0
1927         vpsrldq         \$8,$H1,$T1
1928         vpsrldq         \$8,$H3,$T3
1929         vpsrldq         \$8,$H4,$T4
1930         vpaddq          $T2,$H2,$H2
1931         vpaddq          $T0,$H0,$H0
1932         vpaddq          $T1,$H1,$H1
1933         vpaddq          $T3,$H3,$H3
1934         vpaddq          $T4,$H4,$H4
1935
1936         vpermq          \$0x2,$H2,$T2
1937         vpermq          \$0x2,$H0,$T0
1938         vpermq          \$0x2,$H1,$T1
1939         vpermq          \$0x2,$H3,$T3
1940         vpermq          \$0x2,$H4,$T4
1941         vpaddq          $T2,$H2,$H2
1942         vpaddq          $T0,$H0,$H0
1943         vpaddq          $T1,$H1,$H1
1944         vpaddq          $T3,$H3,$H3
1945         vpaddq          $T4,$H4,$H4
1946
1947         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1948         vmovd           %x#$H1,`4*1-48-64`($ctx)
1949         vmovd           %x#$H2,`4*2-48-64`($ctx)
1950         vmovd           %x#$H3,`4*3-48-64`($ctx)
1951         vmovd           %x#$H4,`4*4-48-64`($ctx)
1952 ___
1953 $code.=<<___    if ($win64);
1954         vmovdqa         0x50(%r11),%xmm6
1955         vmovdqa         0x60(%r11),%xmm7
1956         vmovdqa         0x70(%r11),%xmm8
1957         vmovdqa         0x80(%r11),%xmm9
1958         vmovdqa         0x90(%r11),%xmm10
1959         vmovdqa         0xa0(%r11),%xmm11
1960         vmovdqa         0xb0(%r11),%xmm12
1961         vmovdqa         0xc0(%r11),%xmm13
1962         vmovdqa         0xd0(%r11),%xmm14
1963         vmovdqa         0xe0(%r11),%xmm15
1964         lea             0xf8(%r11),%rsp
1965 .Ldo_avx2_epilogue:
1966 ___
1967 $code.=<<___    if (!$win64);
1968         lea             8(%r11),%rsp
1969 ___
1970 $code.=<<___;
1971         vzeroupper
1972         ret
1973 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
1974 ___
1975 }
1976 $code.=<<___;
1977 .align  64
1978 .Lconst:
1979 .Lmask24:
1980 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1981 .L129:
1982 .long   1<<24,0,1<<24,0,1<<24,0,1<<24,0
1983 .Lmask26:
1984 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
1985 .Lfive:
1986 .long   5,0,5,0,5,0,5,0
1987 ___
1988 }
1989
1990 $code.=<<___;
1991 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1992 .align  16
1993 ___
1994
1995 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1996 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1997 if ($win64) {
1998 $rec="%rcx";
1999 $frame="%rdx";
2000 $context="%r8";
2001 $disp="%r9";
2002
2003 $code.=<<___;
2004 .extern __imp_RtlVirtualUnwind
2005 .type   se_handler,\@abi-omnipotent
2006 .align  16
2007 se_handler:
2008         push    %rsi
2009         push    %rdi
2010         push    %rbx
2011         push    %rbp
2012         push    %r12
2013         push    %r13
2014         push    %r14
2015         push    %r15
2016         pushfq
2017         sub     \$64,%rsp
2018
2019         mov     120($context),%rax      # pull context->Rax
2020         mov     248($context),%rbx      # pull context->Rip
2021
2022         mov     8($disp),%rsi           # disp->ImageBase
2023         mov     56($disp),%r11          # disp->HandlerData
2024
2025         mov     0(%r11),%r10d           # HandlerData[0]
2026         lea     (%rsi,%r10),%r10        # prologue label
2027         cmp     %r10,%rbx               # context->Rip<.Lprologue
2028         jb      .Lcommon_seh_tail
2029
2030         mov     152($context),%rax      # pull context->Rsp
2031
2032         mov     4(%r11),%r10d           # HandlerData[1]
2033         lea     (%rsi,%r10),%r10        # epilogue label
2034         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2035         jae     .Lcommon_seh_tail
2036
2037         lea     48(%rax),%rax
2038
2039         mov     -8(%rax),%rbx
2040         mov     -16(%rax),%rbp
2041         mov     -24(%rax),%r12
2042         mov     -32(%rax),%r13
2043         mov     -40(%rax),%r14
2044         mov     -48(%rax),%r15
2045         mov     %rbx,144($context)      # restore context->Rbx
2046         mov     %rbp,160($context)      # restore context->Rbp
2047         mov     %r12,216($context)      # restore context->R12
2048         mov     %r13,224($context)      # restore context->R13
2049         mov     %r14,232($context)      # restore context->R14
2050         mov     %r15,240($context)      # restore context->R14
2051
2052         jmp     .Lcommon_seh_tail
2053 .size   se_handler,.-se_handler
2054
2055 .type   avx_handler,\@abi-omnipotent
2056 .align  16
2057 avx_handler:
2058         push    %rsi
2059         push    %rdi
2060         push    %rbx
2061         push    %rbp
2062         push    %r12
2063         push    %r13
2064         push    %r14
2065         push    %r15
2066         pushfq
2067         sub     \$64,%rsp
2068
2069         mov     120($context),%rax      # pull context->Rax
2070         mov     248($context),%rbx      # pull context->Rip
2071
2072         mov     8($disp),%rsi           # disp->ImageBase
2073         mov     56($disp),%r11          # disp->HandlerData
2074
2075         mov     0(%r11),%r10d           # HandlerData[0]
2076         lea     (%rsi,%r10),%r10        # prologue label
2077         cmp     %r10,%rbx               # context->Rip<prologue label
2078         jb      .Lcommon_seh_tail
2079
2080         mov     152($context),%rax      # pull context->Rsp
2081
2082         mov     4(%r11),%r10d           # HandlerData[1]
2083         lea     (%rsi,%r10),%r10        # epilogue label
2084         cmp     %r10,%rbx               # context->Rip>=epilogue label
2085         jae     .Lcommon_seh_tail
2086
2087         mov     208($context),%rax      # pull context->R11
2088
2089         lea     0x50(%rax),%rsi
2090         lea     0xf8(%rax),%rax
2091         lea     512($context),%rdi      # &context.Xmm6
2092         mov     \$20,%ecx
2093         .long   0xa548f3fc              # cld; rep movsq
2094
2095 .Lcommon_seh_tail:
2096         mov     8(%rax),%rdi
2097         mov     16(%rax),%rsi
2098         mov     %rax,152($context)      # restore context->Rsp
2099         mov     %rsi,168($context)      # restore context->Rsi
2100         mov     %rdi,176($context)      # restore context->Rdi
2101
2102         mov     40($disp),%rdi          # disp->ContextRecord
2103         mov     $context,%rsi           # context
2104         mov     \$154,%ecx              # sizeof(CONTEXT)
2105         .long   0xa548f3fc              # cld; rep movsq
2106
2107         mov     $disp,%rsi
2108         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2109         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2110         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2111         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2112         mov     40(%rsi),%r10           # disp->ContextRecord
2113         lea     56(%rsi),%r11           # &disp->HandlerData
2114         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2115         mov     %r10,32(%rsp)           # arg5
2116         mov     %r11,40(%rsp)           # arg6
2117         mov     %r12,48(%rsp)           # arg7
2118         mov     %rcx,56(%rsp)           # arg8, (NULL)
2119         call    *__imp_RtlVirtualUnwind(%rip)
2120
2121         mov     \$1,%eax                # ExceptionContinueSearch
2122         add     \$64,%rsp
2123         popfq
2124         pop     %r15
2125         pop     %r14
2126         pop     %r13
2127         pop     %r12
2128         pop     %rbp
2129         pop     %rbx
2130         pop     %rdi
2131         pop     %rsi
2132         ret
2133 .size   avx_handler,.-avx_handler
2134
2135 .section        .pdata
2136 .align  4
2137         .rva    .LSEH_begin_poly1305_init
2138         .rva    .LSEH_end_poly1305_init
2139         .rva    .LSEH_info_poly1305_init
2140
2141         .rva    .LSEH_begin_poly1305_blocks
2142         .rva    .LSEH_end_poly1305_blocks
2143         .rva    .LSEH_info_poly1305_blocks
2144
2145         .rva    .LSEH_begin_poly1305_emit
2146         .rva    .LSEH_end_poly1305_emit
2147         .rva    .LSEH_info_poly1305_emit
2148 ___
2149 $code.=<<___ if ($avx);
2150         .rva    .LSEH_begin_poly1305_blocks_avx
2151         .rva    .Lbase2_64_avx
2152         .rva    .LSEH_info_poly1305_blocks_avx_1
2153
2154         .rva    .Lbase2_64_avx
2155         .rva    .Leven_avx
2156         .rva    .LSEH_info_poly1305_blocks_avx_2
2157
2158         .rva    .Leven_avx
2159         .rva    .LSEH_end_poly1305_blocks_avx
2160         .rva    .LSEH_info_poly1305_blocks_avx_3
2161
2162         .rva    .LSEH_begin_poly1305_emit_avx
2163         .rva    .LSEH_end_poly1305_emit_avx
2164         .rva    .LSEH_info_poly1305_emit_avx
2165 ___
2166 $code.=<<___ if ($avx>1);
2167         .rva    .LSEH_begin_poly1305_blocks_avx2
2168         .rva    .Lbase2_64_avx2
2169         .rva    .LSEH_info_poly1305_blocks_avx2_1
2170
2171         .rva    .Lbase2_64_avx2
2172         .rva    .Leven_avx2
2173         .rva    .LSEH_info_poly1305_blocks_avx2_2
2174
2175         .rva    .Leven_avx2
2176         .rva    .LSEH_end_poly1305_blocks_avx2
2177         .rva    .LSEH_info_poly1305_blocks_avx2_3
2178 ___
2179 $code.=<<___;
2180 .section        .xdata
2181 .align  8
2182 .LSEH_info_poly1305_init:
2183         .byte   9,0,0,0
2184         .rva    se_handler
2185         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2186
2187 .LSEH_info_poly1305_blocks:
2188         .byte   9,0,0,0
2189         .rva    se_handler
2190         .rva    .Lblocks_body,.Lblocks_epilogue
2191
2192 .LSEH_info_poly1305_emit:
2193         .byte   9,0,0,0
2194         .rva    se_handler
2195         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2196 ___
2197 $code.=<<___ if ($avx);
2198 .LSEH_info_poly1305_blocks_avx_1:
2199         .byte   9,0,0,0
2200         .rva    se_handler
2201         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
2202
2203 .LSEH_info_poly1305_blocks_avx_2:
2204         .byte   9,0,0,0
2205         .rva    se_handler
2206         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
2207
2208 .LSEH_info_poly1305_blocks_avx_3:
2209         .byte   9,0,0,0
2210         .rva    avx_handler
2211         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
2212
2213 .LSEH_info_poly1305_emit_avx:
2214         .byte   9,0,0,0
2215         .rva    se_handler
2216         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2217 ___
2218 $code.=<<___ if ($avx>1);
2219 .LSEH_info_poly1305_blocks_avx2_1:
2220         .byte   9,0,0,0
2221         .rva    se_handler
2222         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
2223
2224 .LSEH_info_poly1305_blocks_avx2_2:
2225         .byte   9,0,0,0
2226         .rva    se_handler
2227         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
2228
2229 .LSEH_info_poly1305_blocks_avx2_3:
2230         .byte   9,0,0,0
2231         .rva    avx_handler
2232         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
2233 ___
2234 }
2235
2236 foreach (split('\n',$code)) {
2237         s/\`([^\`]*)\`/eval($1)/ge;
2238         s/%r([a-z]+)#d/%e$1/g;
2239         s/%r([0-9]+)#d/%r$1d/g;
2240         s/%x#%y/%x/g;
2241
2242         print $_,"\n";
2243 }
2244 close STDOUT;