MIPS64 assembly pack: add Poly1305 module.
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # This module implements Poly1305 hash for x86_64.
11 #
12 # March 2015
13 #
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
16 #
17 #               IALU/gcc-4.8(*) AVX(**)         AVX2
18 # P4            4.46/+120%      -
19 # Core 2        2.41/+90%       -
20 # Westmere      1.88/+120%      -
21 # Sandy Bridge  1.39/+140%      1.10
22 # Haswell       1.14/+175%      1.11            0.65
23 # Skylake       1.13/+120%      0.96            0.51
24 # Silvermont    2.83/+95%       -
25 # VIA Nano      1.82/+150%      -
26 # Sledgehammer  1.38/+160%      -
27 # Bulldozer     2.30/+130%      0.97
28 #
29 # (*)   improvement coefficients relative to clang are more modest and
30 #       are ~50% on most processors, in both cases we are comparing to
31 #       __int128 code;
32 # (**)  SSE2 implementation was attempted, but among non-AVX processors
33 #       it was faster than integer-only code only on older Intel P4 and
34 #       Core processors, 50-30%, less newer processor is, but slower on
35 #       contemporary ones, for example almost 2x slower on Atom, and as
36 #       former are naturally disappearing, SSE2 is deemed unnecessary;
37
38 $flavour = shift;
39 $output  = shift;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
41
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
43
44 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
47 die "can't locate x86_64-xlate.pl";
48
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51         $avx = ($1>=2.19) + ($1>=2.22);
52 }
53
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
55            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56         $avx = ($1>=2.09) + ($1>=2.10);
57 }
58
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
60            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61         $avx = ($1>=10) + ($1>=12);
62 }
63
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65         $avx = ($2>=3.0) + ($2>3.0);
66 }
67
68 open OUT,"| \"$^X\" $xlate $flavour $output";
69 *STDOUT=*OUT;
70
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
75
76 sub poly1305_iteration {
77 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output:       $h0-$h2 *= $r0-$r1
79 $code.=<<___;
80         mulq    $h0                     # h0*r1
81         mov     %rax,$d2
82          mov    $r0,%rax
83         mov     %rdx,$d3
84
85         mulq    $h0                     # h0*r0
86         mov     %rax,$h0                # future $h0
87          mov    $r0,%rax
88         mov     %rdx,$d1
89
90         mulq    $h1                     # h1*r0
91         add     %rax,$d2
92          mov    $s1,%rax
93         adc     %rdx,$d3
94
95         mulq    $h1                     # h1*s1
96          mov    $h2,$h1                 # borrow $h1
97         add     %rax,$h0
98         adc     %rdx,$d1
99
100         imulq   $s1,$h1                 # h2*s1
101         add     $h1,$d2
102          mov    $d1,$h1
103         adc     \$0,$d3
104
105         imulq   $r0,$h2                 # h2*r0
106         add     $d2,$h1
107         mov     \$-4,%rax               # mask value
108         adc     $h2,$d3
109
110         and     $d3,%rax                # last reduction step
111         mov     $d3,$h2
112         shr     \$2,$d3
113         and     \$3,$h2
114         add     $d3,%rax
115         add     %rax,$h0
116         adc     \$0,$h1
117         adc     \$0,$h2
118 ___
119 }
120
121 ########################################################################
122 # Layout of opaque area is following.
123 #
124 #       unsigned __int64 h[3];          # current hash value base 2^64
125 #       unsigned __int64 r[2];          # key value base 2^64
126
127 $code.=<<___;
128 .text
129
130 .extern OPENSSL_ia32cap_P
131
132 .globl  poly1305_init
133 .hidden poly1305_init
134 .globl  poly1305_blocks
135 .hidden poly1305_blocks
136 .globl  poly1305_emit
137 .hidden poly1305_emit
138
139 .type   poly1305_init,\@function,3
140 .align  32
141 poly1305_init:
142         xor     %rax,%rax
143         mov     %rax,0($ctx)            # initialize hash value
144         mov     %rax,8($ctx)
145         mov     %rax,16($ctx)
146
147         cmp     \$0,$inp
148         je      .Lno_key
149
150         lea     poly1305_blocks(%rip),%r10
151         lea     poly1305_emit(%rip),%r11
152 ___
153 $code.=<<___    if ($avx);
154         mov     OPENSSL_ia32cap_P+4(%rip),%r9
155         lea     poly1305_blocks_avx(%rip),%rax
156         lea     poly1305_emit_avx(%rip),%rcx
157         bt      \$`60-32`,%r9           # AVX?
158         cmovc   %rax,%r10
159         cmovc   %rcx,%r11
160 ___
161 $code.=<<___    if ($avx>1);
162         lea     poly1305_blocks_avx2(%rip),%rax
163         bt      \$`5+32`,%r9            # AVX2?
164         cmovc   %rax,%r10
165 ___
166 $code.=<<___;
167         mov     \$0x0ffffffc0fffffff,%rax
168         mov     \$0x0ffffffc0ffffffc,%rcx
169         and     0($inp),%rax
170         and     8($inp),%rcx
171         mov     %rax,24($ctx)
172         mov     %rcx,32($ctx)
173 ___
174 $code.=<<___    if ($flavour !~ /elf32/);
175         mov     %r10,0(%rdx)
176         mov     %r11,8(%rdx)
177 ___
178 $code.=<<___    if ($flavour =~ /elf32/);
179         mov     %r10d,0(%rdx)
180         mov     %r11d,4(%rdx)
181 ___
182 $code.=<<___;
183         mov     \$1,%eax
184 .Lno_key:
185         ret
186 .size   poly1305_init,.-poly1305_init
187
188 .type   poly1305_blocks,\@function,4
189 .align  32
190 poly1305_blocks:
191 .Lblocks:
192         shr     \$4,$len
193         jz      .Lno_data               # too short
194
195         push    %rbx
196         push    %rbp
197         push    %r12
198         push    %r13
199         push    %r14
200         push    %r15
201 .Lblocks_body:
202
203         mov     $len,%r15               # reassign $len
204
205         mov     24($ctx),$r0            # load r
206         mov     32($ctx),$s1
207
208         mov     0($ctx),$h0             # load hash value
209         mov     8($ctx),$h1
210         mov     16($ctx),$h2
211
212         mov     $s1,$r1
213         shr     \$2,$s1
214         mov     $r1,%rax
215         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
216         jmp     .Loop
217
218 .align  32
219 .Loop:
220         add     0($inp),$h0             # accumulate input
221         adc     8($inp),$h1
222         lea     16($inp),$inp
223         adc     $padbit,$h2
224 ___
225         &poly1305_iteration();
226 $code.=<<___;
227         mov     $r1,%rax
228         dec     %r15                    # len-=16
229         jnz     .Loop
230
231         mov     $h0,0($ctx)             # store hash value
232         mov     $h1,8($ctx)
233         mov     $h2,16($ctx)
234
235         mov     0(%rsp),%r15
236         mov     8(%rsp),%r14
237         mov     16(%rsp),%r13
238         mov     24(%rsp),%r12
239         mov     32(%rsp),%rbp
240         mov     40(%rsp),%rbx
241         lea     48(%rsp),%rsp
242 .Lno_data:
243 .Lblocks_epilogue:
244         ret
245 .size   poly1305_blocks,.-poly1305_blocks
246
247 .type   poly1305_emit,\@function,3
248 .align  32
249 poly1305_emit:
250 .Lemit:
251         mov     0($ctx),%r8     # load hash value
252         mov     8($ctx),%r9
253         mov     16($ctx),%r10
254
255         mov     %r8,%rax
256         add     \$5,%r8         # compare to modulus
257         mov     %r9,%rcx
258         adc     \$0,%r9
259         adc     \$0,%r10
260         shr     \$2,%r10        # did 130-bit value overfow?
261         cmovnz  %r8,%rax
262         cmovnz  %r9,%rcx
263
264         add     0($nonce),%rax  # accumulate nonce
265         adc     8($nonce),%rcx
266         mov     %rax,0($mac)    # write result
267         mov     %rcx,8($mac)
268
269         ret
270 .size   poly1305_emit,.-poly1305_emit
271 ___
272 if ($avx) {
273
274 ########################################################################
275 # Layout of opaque area is following.
276 #
277 #       unsigned __int32 h[5];          # current hash value base 2^26
278 #       unsigned __int32 is_base2_26;
279 #       unsigned __int64 r[2];          # key value base 2^64
280 #       unsigned __int64 pad;
281 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
282 #
283 # where r^n are base 2^26 digits of degrees of multiplier key. There are
284 # 5 digits, but last four are interleaved with multiples of 5, totalling
285 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
286
287 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
288     map("%xmm$_",(0..15));
289
290 $code.=<<___;
291 .type   __poly1305_block,\@abi-omnipotent
292 .align  32
293 __poly1305_block:
294 ___
295         &poly1305_iteration();
296 $code.=<<___;
297         ret
298 .size   __poly1305_block,.-__poly1305_block
299
300 .type   __poly1305_init_avx,\@abi-omnipotent
301 .align  32
302 __poly1305_init_avx:
303         mov     $r0,$h0
304         mov     $r1,$h1
305         xor     $h2,$h2
306
307         lea     48+64($ctx),$ctx        # size optimization
308
309         mov     $r1,%rax
310         call    __poly1305_block        # r^2
311
312         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
313         mov     \$0x3ffffff,%edx
314         mov     $h0,$d1
315         and     $h0#d,%eax
316         mov     $r0,$d2
317         and     $r0#d,%edx
318         mov     %eax,`16*0+0-64`($ctx)
319         shr     \$26,$d1
320         mov     %edx,`16*0+4-64`($ctx)
321         shr     \$26,$d2
322
323         mov     \$0x3ffffff,%eax
324         mov     \$0x3ffffff,%edx
325         and     $d1#d,%eax
326         and     $d2#d,%edx
327         mov     %eax,`16*1+0-64`($ctx)
328         lea     (%rax,%rax,4),%eax      # *5
329         mov     %edx,`16*1+4-64`($ctx)
330         lea     (%rdx,%rdx,4),%edx      # *5
331         mov     %eax,`16*2+0-64`($ctx)
332         shr     \$26,$d1
333         mov     %edx,`16*2+4-64`($ctx)
334         shr     \$26,$d2
335
336         mov     $h1,%rax
337         mov     $r1,%rdx
338         shl     \$12,%rax
339         shl     \$12,%rdx
340         or      $d1,%rax
341         or      $d2,%rdx
342         and     \$0x3ffffff,%eax
343         and     \$0x3ffffff,%edx
344         mov     %eax,`16*3+0-64`($ctx)
345         lea     (%rax,%rax,4),%eax      # *5
346         mov     %edx,`16*3+4-64`($ctx)
347         lea     (%rdx,%rdx,4),%edx      # *5
348         mov     %eax,`16*4+0-64`($ctx)
349         mov     $h1,$d1
350         mov     %edx,`16*4+4-64`($ctx)
351         mov     $r1,$d2
352
353         mov     \$0x3ffffff,%eax
354         mov     \$0x3ffffff,%edx
355         shr     \$14,$d1
356         shr     \$14,$d2
357         and     $d1#d,%eax
358         and     $d2#d,%edx
359         mov     %eax,`16*5+0-64`($ctx)
360         lea     (%rax,%rax,4),%eax      # *5
361         mov     %edx,`16*5+4-64`($ctx)
362         lea     (%rdx,%rdx,4),%edx      # *5
363         mov     %eax,`16*6+0-64`($ctx)
364         shr     \$26,$d1
365         mov     %edx,`16*6+4-64`($ctx)
366         shr     \$26,$d2
367
368         mov     $h2,%rax
369         shl     \$24,%rax
370         or      %rax,$d1
371         mov     $d1#d,`16*7+0-64`($ctx)
372         lea     ($d1,$d1,4),$d1         # *5
373         mov     $d2#d,`16*7+4-64`($ctx)
374         lea     ($d2,$d2,4),$d2         # *5
375         mov     $d1#d,`16*8+0-64`($ctx)
376         mov     $d2#d,`16*8+4-64`($ctx)
377
378         mov     $r1,%rax
379         call    __poly1305_block        # r^3
380
381         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
382         mov     $h0,$d1
383         and     $h0#d,%eax
384         shr     \$26,$d1
385         mov     %eax,`16*0+12-64`($ctx)
386
387         mov     \$0x3ffffff,%edx
388         and     $d1#d,%edx
389         mov     %edx,`16*1+12-64`($ctx)
390         lea     (%rdx,%rdx,4),%edx      # *5
391         shr     \$26,$d1
392         mov     %edx,`16*2+12-64`($ctx)
393
394         mov     $h1,%rax
395         shl     \$12,%rax
396         or      $d1,%rax
397         and     \$0x3ffffff,%eax
398         mov     %eax,`16*3+12-64`($ctx)
399         lea     (%rax,%rax,4),%eax      # *5
400         mov     $h1,$d1
401         mov     %eax,`16*4+12-64`($ctx)
402
403         mov     \$0x3ffffff,%edx
404         shr     \$14,$d1
405         and     $d1#d,%edx
406         mov     %edx,`16*5+12-64`($ctx)
407         lea     (%rdx,%rdx,4),%edx      # *5
408         shr     \$26,$d1
409         mov     %edx,`16*6+12-64`($ctx)
410
411         mov     $h2,%rax
412         shl     \$24,%rax
413         or      %rax,$d1
414         mov     $d1#d,`16*7+12-64`($ctx)
415         lea     ($d1,$d1,4),$d1         # *5
416         mov     $d1#d,`16*8+12-64`($ctx)
417
418         mov     $r1,%rax
419         call    __poly1305_block        # r^4
420
421         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
422         mov     $h0,$d1
423         and     $h0#d,%eax
424         shr     \$26,$d1
425         mov     %eax,`16*0+8-64`($ctx)
426
427         mov     \$0x3ffffff,%edx
428         and     $d1#d,%edx
429         mov     %edx,`16*1+8-64`($ctx)
430         lea     (%rdx,%rdx,4),%edx      # *5
431         shr     \$26,$d1
432         mov     %edx,`16*2+8-64`($ctx)
433
434         mov     $h1,%rax
435         shl     \$12,%rax
436         or      $d1,%rax
437         and     \$0x3ffffff,%eax
438         mov     %eax,`16*3+8-64`($ctx)
439         lea     (%rax,%rax,4),%eax      # *5
440         mov     $h1,$d1
441         mov     %eax,`16*4+8-64`($ctx)
442
443         mov     \$0x3ffffff,%edx
444         shr     \$14,$d1
445         and     $d1#d,%edx
446         mov     %edx,`16*5+8-64`($ctx)
447         lea     (%rdx,%rdx,4),%edx      # *5
448         shr     \$26,$d1
449         mov     %edx,`16*6+8-64`($ctx)
450
451         mov     $h2,%rax
452         shl     \$24,%rax
453         or      %rax,$d1
454         mov     $d1#d,`16*7+8-64`($ctx)
455         lea     ($d1,$d1,4),$d1         # *5
456         mov     $d1#d,`16*8+8-64`($ctx)
457
458         lea     -48-64($ctx),$ctx       # size [de-]optimization
459         ret
460 .size   __poly1305_init_avx,.-__poly1305_init_avx
461
462 .type   poly1305_blocks_avx,\@function,4
463 .align  32
464 poly1305_blocks_avx:
465         mov     20($ctx),%r8d           # is_base2_26
466         cmp     \$128,$len
467         jae     .Lblocks_avx
468         test    %r8d,%r8d
469         jz      .Lblocks
470
471 .Lblocks_avx:
472         and     \$-16,$len
473         jz      .Lno_data_avx
474
475         vzeroupper
476
477         test    %r8d,%r8d
478         jz      .Lbase2_64_avx
479
480         test    \$31,$len
481         jz      .Leven_avx
482
483         push    %rbx
484         push    %rbp
485         push    %r12
486         push    %r13
487         push    %r14
488         push    %r15
489 .Lblocks_avx_body:
490
491         mov     $len,%r15               # reassign $len
492
493         mov     0($ctx),$d1             # load hash value
494         mov     8($ctx),$d2
495         mov     16($ctx),$h2#d
496
497         mov     24($ctx),$r0            # load r
498         mov     32($ctx),$s1
499
500         ################################# base 2^26 -> base 2^64
501         mov     $d1#d,$h0#d
502         and     \$`-1*(1<<31)`,$d1
503         mov     $d2,$r1                 # borrow $r1
504         mov     $d2#d,$h1#d
505         and     \$`-1*(1<<31)`,$d2
506
507         shr     \$6,$d1
508         shl     \$52,$r1
509         add     $d1,$h0
510         shr     \$12,$h1
511         shr     \$18,$d2
512         add     $r1,$h0
513         adc     $d2,$h1
514
515         mov     $h2,$d1
516         shl     \$40,$d1
517         shr     \$24,$h2
518         add     $d1,$h1
519         adc     \$0,$h2                 # can be partially reduced...
520
521         mov     \$-4,$d2                # ... so reduce
522         mov     $h2,$d1
523         and     $h2,$d2
524         shr     \$2,$d1
525         and     \$3,$h2
526         add     $d2,$d1                 # =*5
527         add     $d1,$h0
528         adc     \$0,$h1
529         adc     \$0,$h2
530
531         mov     $s1,$r1
532         mov     $s1,%rax
533         shr     \$2,$s1
534         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
535
536         add     0($inp),$h0             # accumulate input
537         adc     8($inp),$h1
538         lea     16($inp),$inp
539         adc     $padbit,$h2
540
541         call    __poly1305_block
542
543         test    $padbit,$padbit         # if $padbit is zero,
544         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
545
546         ################################# base 2^64 -> base 2^26
547         mov     $h0,%rax
548         mov     $h0,%rdx
549         shr     \$52,$h0
550         mov     $h1,$r0
551         mov     $h1,$r1
552         shr     \$26,%rdx
553         and     \$0x3ffffff,%rax        # h[0]
554         shl     \$12,$r0
555         and     \$0x3ffffff,%rdx        # h[1]
556         shr     \$14,$h1
557         or      $r0,$h0
558         shl     \$24,$h2
559         and     \$0x3ffffff,$h0         # h[2]
560         shr     \$40,$r1
561         and     \$0x3ffffff,$h1         # h[3]
562         or      $r1,$h2                 # h[4]
563
564         sub     \$16,%r15
565         jz      .Lstore_base2_26_avx
566
567         vmovd   %rax#d,$H0
568         vmovd   %rdx#d,$H1
569         vmovd   $h0#d,$H2
570         vmovd   $h1#d,$H3
571         vmovd   $h2#d,$H4
572         jmp     .Lproceed_avx
573
574 .align  32
575 .Lstore_base2_64_avx:
576         mov     $h0,0($ctx)
577         mov     $h1,8($ctx)
578         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
579         jmp     .Ldone_avx
580
581 .align  16
582 .Lstore_base2_26_avx:
583         mov     %rax#d,0($ctx)          # store hash value base 2^26
584         mov     %rdx#d,4($ctx)
585         mov     $h0#d,8($ctx)
586         mov     $h1#d,12($ctx)
587         mov     $h2#d,16($ctx)
588 .align  16
589 .Ldone_avx:
590         mov     0(%rsp),%r15
591         mov     8(%rsp),%r14
592         mov     16(%rsp),%r13
593         mov     24(%rsp),%r12
594         mov     32(%rsp),%rbp
595         mov     40(%rsp),%rbx
596         lea     48(%rsp),%rsp
597 .Lno_data_avx:
598 .Lblocks_avx_epilogue:
599         ret
600
601 .align  32
602 .Lbase2_64_avx:
603         push    %rbx
604         push    %rbp
605         push    %r12
606         push    %r13
607         push    %r14
608         push    %r15
609 .Lbase2_64_avx_body:
610
611         mov     $len,%r15               # reassign $len
612
613         mov     24($ctx),$r0            # load r
614         mov     32($ctx),$s1
615
616         mov     0($ctx),$h0             # load hash value
617         mov     8($ctx),$h1
618         mov     16($ctx),$h2#d
619
620         mov     $s1,$r1
621         mov     $s1,%rax
622         shr     \$2,$s1
623         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
624
625         test    \$31,$len
626         jz      .Linit_avx
627
628         add     0($inp),$h0             # accumulate input
629         adc     8($inp),$h1
630         lea     16($inp),$inp
631         adc     $padbit,$h2
632         sub     \$16,%r15
633
634         call    __poly1305_block
635
636 .Linit_avx:
637         ################################# base 2^64 -> base 2^26
638         mov     $h0,%rax
639         mov     $h0,%rdx
640         shr     \$52,$h0
641         mov     $h1,$d1
642         mov     $h1,$d2
643         shr     \$26,%rdx
644         and     \$0x3ffffff,%rax        # h[0]
645         shl     \$12,$d1
646         and     \$0x3ffffff,%rdx        # h[1]
647         shr     \$14,$h1
648         or      $d1,$h0
649         shl     \$24,$h2
650         and     \$0x3ffffff,$h0         # h[2]
651         shr     \$40,$d2
652         and     \$0x3ffffff,$h1         # h[3]
653         or      $d2,$h2                 # h[4]
654
655         vmovd   %rax#d,$H0
656         vmovd   %rdx#d,$H1
657         vmovd   $h0#d,$H2
658         vmovd   $h1#d,$H3
659         vmovd   $h2#d,$H4
660         movl    \$1,20($ctx)            # set is_base2_26
661
662         call    __poly1305_init_avx
663
664 .Lproceed_avx:
665         mov     %r15,$len
666
667         mov     0(%rsp),%r15
668         mov     8(%rsp),%r14
669         mov     16(%rsp),%r13
670         mov     24(%rsp),%r12
671         mov     32(%rsp),%rbp
672         mov     40(%rsp),%rbx
673         lea     48(%rsp),%rax
674         lea     48(%rsp),%rsp
675 .Lbase2_64_avx_epilogue:
676         jmp     .Ldo_avx
677
678 .align  32
679 .Leven_avx:
680         vmovd           4*0($ctx),$H0           # load hash value
681         vmovd           4*1($ctx),$H1
682         vmovd           4*2($ctx),$H2
683         vmovd           4*3($ctx),$H3
684         vmovd           4*4($ctx),$H4
685
686 .Ldo_avx:
687 ___
688 $code.=<<___    if (!$win64);
689         lea             -0x58(%rsp),%r11
690         sub             \$0x178,%rsp
691 ___
692 $code.=<<___    if ($win64);
693         lea             -0xf8(%rsp),%r11
694         sub             \$0x218,%rsp
695         vmovdqa         %xmm6,0x50(%r11)
696         vmovdqa         %xmm7,0x60(%r11)
697         vmovdqa         %xmm8,0x70(%r11)
698         vmovdqa         %xmm9,0x80(%r11)
699         vmovdqa         %xmm10,0x90(%r11)
700         vmovdqa         %xmm11,0xa0(%r11)
701         vmovdqa         %xmm12,0xb0(%r11)
702         vmovdqa         %xmm13,0xc0(%r11)
703         vmovdqa         %xmm14,0xd0(%r11)
704         vmovdqa         %xmm15,0xe0(%r11)
705 .Ldo_avx_body:
706 ___
707 $code.=<<___;
708         sub             \$64,$len
709         lea             -32($inp),%rax
710         cmovc           %rax,$inp
711
712         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
713         lea             `16*3+64`($ctx),$ctx    # size optimization
714         lea             .Lconst(%rip),%rcx
715
716         ################################################################
717         # load input
718         vmovdqu         16*2($inp),$T0
719         vmovdqu         16*3($inp),$T1
720         vmovdqa         64(%rcx),$MASK          # .Lmask26
721
722         vpsrldq         \$6,$T0,$T2             # splat input
723         vpsrldq         \$6,$T1,$T3
724         vpunpckhqdq     $T1,$T0,$T4             # 4
725         vpunpcklqdq     $T1,$T0,$T0             # 0:1
726         vpunpcklqdq     $T3,$T2,$T3             # 2:3
727
728         vpsrlq          \$40,$T4,$T4            # 4
729         vpsrlq          \$26,$T0,$T1
730         vpand           $MASK,$T0,$T0           # 0
731         vpsrlq          \$4,$T3,$T2
732         vpand           $MASK,$T1,$T1           # 1
733         vpsrlq          \$30,$T3,$T3
734         vpand           $MASK,$T2,$T2           # 2
735         vpand           $MASK,$T3,$T3           # 3
736         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
737
738         jbe             .Lskip_loop_avx
739
740         # expand and copy pre-calculated table to stack
741         vmovdqu         `16*1-64`($ctx),$D1
742         vmovdqu         `16*2-64`($ctx),$D2
743         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
744         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
745         vmovdqa         $D3,-0x90(%r11)
746         vmovdqa         $D0,0x00(%rsp)
747         vpshufd         \$0xEE,$D1,$D4
748         vmovdqu         `16*3-64`($ctx),$D0
749         vpshufd         \$0x44,$D1,$D1
750         vmovdqa         $D4,-0x80(%r11)
751         vmovdqa         $D1,0x10(%rsp)
752         vpshufd         \$0xEE,$D2,$D3
753         vmovdqu         `16*4-64`($ctx),$D1
754         vpshufd         \$0x44,$D2,$D2
755         vmovdqa         $D3,-0x70(%r11)
756         vmovdqa         $D2,0x20(%rsp)
757         vpshufd         \$0xEE,$D0,$D4
758         vmovdqu         `16*5-64`($ctx),$D2
759         vpshufd         \$0x44,$D0,$D0
760         vmovdqa         $D4,-0x60(%r11)
761         vmovdqa         $D0,0x30(%rsp)
762         vpshufd         \$0xEE,$D1,$D3
763         vmovdqu         `16*6-64`($ctx),$D0
764         vpshufd         \$0x44,$D1,$D1
765         vmovdqa         $D3,-0x50(%r11)
766         vmovdqa         $D1,0x40(%rsp)
767         vpshufd         \$0xEE,$D2,$D4
768         vmovdqu         `16*7-64`($ctx),$D1
769         vpshufd         \$0x44,$D2,$D2
770         vmovdqa         $D4,-0x40(%r11)
771         vmovdqa         $D2,0x50(%rsp)
772         vpshufd         \$0xEE,$D0,$D3
773         vmovdqu         `16*8-64`($ctx),$D2
774         vpshufd         \$0x44,$D0,$D0
775         vmovdqa         $D3,-0x30(%r11)
776         vmovdqa         $D0,0x60(%rsp)
777         vpshufd         \$0xEE,$D1,$D4
778         vpshufd         \$0x44,$D1,$D1
779         vmovdqa         $D4,-0x20(%r11)
780         vmovdqa         $D1,0x70(%rsp)
781         vpshufd         \$0xEE,$D2,$D3
782          vmovdqa        0x00(%rsp),$D4          # preload r0^2
783         vpshufd         \$0x44,$D2,$D2
784         vmovdqa         $D3,-0x10(%r11)
785         vmovdqa         $D2,0x80(%rsp)
786
787         jmp             .Loop_avx
788
789 .align  32
790 .Loop_avx:
791         ################################################################
792         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
793         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
794         #   \___________________/
795         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
796         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
797         #   \___________________/ \____________________/
798         #
799         # Note that we start with inp[2:3]*r^2. This is because it
800         # doesn't depend on reduction in previous iteration.
801         ################################################################
802         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
803         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
804         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
805         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
806         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
807         #
808         # though note that $Tx and $Hx are "reversed" in this section,
809         # and $D4 is preloaded with r0^2...
810
811         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
812         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
813           vmovdqa       $H2,0x20(%r11)                          # offload hash
814         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
815          vmovdqa        0x10(%rsp),$H2          # r1^2
816         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
817         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
818
819           vmovdqa       $H0,0x00(%r11)                          #
820         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
821           vmovdqa       $H1,0x10(%r11)                          #
822         vpmuludq        $T3,$H2,$H1             # h3*r1
823         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
824         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
825           vmovdqa       $H3,0x30(%r11)                          #
826         vpmuludq        $T2,$H2,$H0             # h2*r1
827         vpmuludq        $T1,$H2,$H1             # h1*r1
828         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
829          vmovdqa        0x30(%rsp),$H3          # r2^2
830         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
831           vmovdqa       $H4,0x40(%r11)                          #
832         vpmuludq        $T0,$H2,$H2             # h0*r1
833          vpmuludq       $T2,$H3,$H0             # h2*r2
834         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
835
836          vmovdqa        0x40(%rsp),$H4          # s2^2
837         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
838         vpmuludq        $T1,$H3,$H1             # h1*r2
839         vpmuludq        $T0,$H3,$H3             # h0*r2
840         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
841          vmovdqa        0x50(%rsp),$H2          # r3^2
842         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
843         vpmuludq        $T4,$H4,$H0             # h4*s2
844         vpmuludq        $T3,$H4,$H4             # h3*s2
845         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
846          vmovdqa        0x60(%rsp),$H3          # s3^2
847         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
848
849          vmovdqa        0x80(%rsp),$H4          # s4^2
850         vpmuludq        $T1,$H2,$H1             # h1*r3
851         vpmuludq        $T0,$H2,$H2             # h0*r3
852         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
853         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
854         vpmuludq        $T4,$H3,$H0             # h4*s3
855         vpmuludq        $T3,$H3,$H1             # h3*s3
856         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
857          vmovdqu        16*0($inp),$H0                          # load input
858         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
859         vpmuludq        $T2,$H3,$H3             # h2*s3
860          vpmuludq       $T2,$H4,$T2             # h2*s4
861         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
862
863          vmovdqu        16*1($inp),$H1                          #
864         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
865         vpmuludq        $T3,$H4,$T3             # h3*s4
866         vpmuludq        $T4,$H4,$T4             # h4*s4
867          vpsrldq        \$6,$H0,$H2                             # splat input
868         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
869         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
870          vpsrldq        \$6,$H1,$H3                             #
871         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
872         vpmuludq        $T1,$H4,$T0             # h1*s4
873          vpunpckhqdq    $H1,$H0,$H4             # 4
874         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
875          vmovdqa        -0x90(%r11),$T4         # r0^4
876         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
877
878         vpunpcklqdq     $H1,$H0,$H0             # 0:1
879         vpunpcklqdq     $H3,$H2,$H3             # 2:3
880
881         #vpsrlq         \$40,$H4,$H4            # 4
882         vpsrldq         \$`40/8`,$H4,$H4        # 4
883         vpsrlq          \$26,$H0,$H1
884         vpand           $MASK,$H0,$H0           # 0
885         vpsrlq          \$4,$H3,$H2
886         vpand           $MASK,$H1,$H1           # 1
887         vpand           0(%rcx),$H4,$H4         # .Lmask24
888         vpsrlq          \$30,$H3,$H3
889         vpand           $MASK,$H2,$H2           # 2
890         vpand           $MASK,$H3,$H3           # 3
891         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
892
893         vpaddq          0x00(%r11),$H0,$H0      # add hash value
894         vpaddq          0x10(%r11),$H1,$H1
895         vpaddq          0x20(%r11),$H2,$H2
896         vpaddq          0x30(%r11),$H3,$H3
897         vpaddq          0x40(%r11),$H4,$H4
898
899         lea             16*2($inp),%rax
900         lea             16*4($inp),$inp
901         sub             \$64,$len
902         cmovc           %rax,$inp
903
904         ################################################################
905         # Now we accumulate (inp[0:1]+hash)*r^4
906         ################################################################
907         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
908         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
909         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
910         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
911         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
912
913         vpmuludq        $H0,$T4,$T0             # h0*r0
914         vpmuludq        $H1,$T4,$T1             # h1*r0
915         vpaddq          $T0,$D0,$D0
916         vpaddq          $T1,$D1,$D1
917          vmovdqa        -0x80(%r11),$T2         # r1^4
918         vpmuludq        $H2,$T4,$T0             # h2*r0
919         vpmuludq        $H3,$T4,$T1             # h3*r0
920         vpaddq          $T0,$D2,$D2
921         vpaddq          $T1,$D3,$D3
922         vpmuludq        $H4,$T4,$T4             # h4*r0
923          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
924         vpaddq          $T4,$D4,$D4
925
926         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
927         vpmuludq        $H2,$T2,$T1             # h2*r1
928         vpmuludq        $H3,$T2,$T0             # h3*r1
929         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
930          vmovdqa        -0x60(%r11),$T3         # r2^4
931         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
932         vpmuludq        $H1,$T2,$T1             # h1*r1
933         vpmuludq        $H0,$T2,$T2             # h0*r1
934         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
935         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
936
937          vmovdqa        -0x50(%r11),$T4         # s2^4
938         vpmuludq        $H2,$T3,$T0             # h2*r2
939         vpmuludq        $H1,$T3,$T1             # h1*r2
940         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
941         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
942          vmovdqa        -0x40(%r11),$T2         # r3^4
943         vpmuludq        $H0,$T3,$T3             # h0*r2
944         vpmuludq        $H4,$T4,$T0             # h4*s2
945         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
946         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
947          vmovdqa        -0x30(%r11),$T3         # s3^4
948         vpmuludq        $H3,$T4,$T4             # h3*s2
949          vpmuludq       $H1,$T2,$T1             # h1*r3
950         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
951
952          vmovdqa        -0x10(%r11),$T4         # s4^4
953         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
954         vpmuludq        $H0,$T2,$T2             # h0*r3
955         vpmuludq        $H4,$T3,$T0             # h4*s3
956         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
957         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
958          vmovdqu        16*2($inp),$T0                          # load input
959         vpmuludq        $H3,$T3,$T2             # h3*s3
960         vpmuludq        $H2,$T3,$T3             # h2*s3
961         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
962          vmovdqu        16*3($inp),$T1                          #
963         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
964
965         vpmuludq        $H2,$T4,$H2             # h2*s4
966         vpmuludq        $H3,$T4,$H3             # h3*s4
967          vpsrldq        \$6,$T0,$T2                             # splat input
968         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
969         vpmuludq        $H4,$T4,$H4             # h4*s4
970          vpsrldq        \$6,$T1,$T3                             #
971         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
972         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
973         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
974         vpmuludq        $H1,$T4,$H0
975          vpunpckhqdq    $T1,$T0,$T4             # 4
976         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
977         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
978
979         vpunpcklqdq     $T1,$T0,$T0             # 0:1
980         vpunpcklqdq     $T3,$T2,$T3             # 2:3
981
982         #vpsrlq         \$40,$T4,$T4            # 4
983         vpsrldq         \$`40/8`,$T4,$T4        # 4
984         vpsrlq          \$26,$T0,$T1
985          vmovdqa        0x00(%rsp),$D4          # preload r0^2
986         vpand           $MASK,$T0,$T0           # 0
987         vpsrlq          \$4,$T3,$T2
988         vpand           $MASK,$T1,$T1           # 1
989         vpand           0(%rcx),$T4,$T4         # .Lmask24
990         vpsrlq          \$30,$T3,$T3
991         vpand           $MASK,$T2,$T2           # 2
992         vpand           $MASK,$T3,$T3           # 3
993         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
994
995         ################################################################
996         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
997         # and P. Schwabe
998
999         vpsrlq          \$26,$H3,$D3
1000         vpand           $MASK,$H3,$H3
1001         vpaddq          $D3,$H4,$H4             # h3 -> h4
1002
1003         vpsrlq          \$26,$H0,$D0
1004         vpand           $MASK,$H0,$H0
1005         vpaddq          $D0,$D1,$H1             # h0 -> h1
1006
1007         vpsrlq          \$26,$H4,$D0
1008         vpand           $MASK,$H4,$H4
1009
1010         vpsrlq          \$26,$H1,$D1
1011         vpand           $MASK,$H1,$H1
1012         vpaddq          $D1,$H2,$H2             # h1 -> h2
1013
1014         vpaddq          $D0,$H0,$H0
1015         vpsllq          \$2,$D0,$D0
1016         vpaddq          $D0,$H0,$H0             # h4 -> h0
1017
1018         vpsrlq          \$26,$H2,$D2
1019         vpand           $MASK,$H2,$H2
1020         vpaddq          $D2,$H3,$H3             # h2 -> h3
1021
1022         vpsrlq          \$26,$H0,$D0
1023         vpand           $MASK,$H0,$H0
1024         vpaddq          $D0,$H1,$H1             # h0 -> h1
1025
1026         vpsrlq          \$26,$H3,$D3
1027         vpand           $MASK,$H3,$H3
1028         vpaddq          $D3,$H4,$H4             # h3 -> h4
1029
1030         ja              .Loop_avx
1031
1032 .Lskip_loop_avx:
1033         ################################################################
1034         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1035
1036         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1037         add             \$32,$len
1038         jnz             .Long_tail_avx
1039
1040         vpaddq          $H2,$T2,$T2
1041         vpaddq          $H0,$T0,$T0
1042         vpaddq          $H1,$T1,$T1
1043         vpaddq          $H3,$T3,$T3
1044         vpaddq          $H4,$T4,$T4
1045
1046 .Long_tail_avx:
1047         vmovdqa         $H2,0x20(%r11)
1048         vmovdqa         $H0,0x00(%r11)
1049         vmovdqa         $H1,0x10(%r11)
1050         vmovdqa         $H3,0x30(%r11)
1051         vmovdqa         $H4,0x40(%r11)
1052
1053         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1054         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1055         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1056         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1057         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1058
1059         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1060         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1061          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1062         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1063         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1064         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1065
1066         vpmuludq        $T3,$H2,$H0             # h3*r1
1067         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1068          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1069         vpmuludq        $T2,$H2,$H1             # h2*r1
1070         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1071          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1072         vpmuludq        $T1,$H2,$H0             # h1*r1
1073         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1074         vpmuludq        $T0,$H2,$H2             # h0*r1
1075         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1076         vpmuludq        $T4,$H3,$H3             # h4*s1
1077         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1078
1079          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1080         vpmuludq        $T2,$H4,$H1             # h2*r2
1081         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1082         vpmuludq        $T1,$H4,$H0             # h1*r2
1083         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1084          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1085         vpmuludq        $T0,$H4,$H4             # h0*r2
1086         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1087         vpmuludq        $T4,$H2,$H1             # h4*s2
1088         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1089          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1090         vpmuludq        $T3,$H2,$H2             # h3*s2
1091         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1092
1093         vpmuludq        $T1,$H3,$H0             # h1*r3
1094         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1095         vpmuludq        $T0,$H3,$H3             # h0*r3
1096         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1097          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1098         vpmuludq        $T4,$H4,$H1             # h4*s3
1099         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1100          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1101         vpmuludq        $T3,$H4,$H0             # h3*s3
1102         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1103         vpmuludq        $T2,$H4,$H4             # h2*s3
1104         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1105
1106         vpmuludq        $T0,$H2,$H2             # h0*r4
1107         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1108         vpmuludq        $T4,$H3,$H1             # h4*s4
1109         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1110         vpmuludq        $T3,$H3,$H0             # h3*s4
1111         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1112         vpmuludq        $T2,$H3,$H1             # h2*s4
1113         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1114         vpmuludq        $T1,$H3,$H3             # h1*s4
1115         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1116
1117         jz              .Lshort_tail_avx
1118
1119         vmovdqu         16*0($inp),$H0          # load input
1120         vmovdqu         16*1($inp),$H1
1121
1122         vpsrldq         \$6,$H0,$H2             # splat input
1123         vpsrldq         \$6,$H1,$H3
1124         vpunpckhqdq     $H1,$H0,$H4             # 4
1125         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1126         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1127
1128         vpsrlq          \$40,$H4,$H4            # 4
1129         vpsrlq          \$26,$H0,$H1
1130         vpand           $MASK,$H0,$H0           # 0
1131         vpsrlq          \$4,$H3,$H2
1132         vpand           $MASK,$H1,$H1           # 1
1133         vpsrlq          \$30,$H3,$H3
1134         vpand           $MASK,$H2,$H2           # 2
1135         vpand           $MASK,$H3,$H3           # 3
1136         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1137
1138         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1139         vpaddq          0x00(%r11),$H0,$H0
1140         vpaddq          0x10(%r11),$H1,$H1
1141         vpaddq          0x20(%r11),$H2,$H2
1142         vpaddq          0x30(%r11),$H3,$H3
1143         vpaddq          0x40(%r11),$H4,$H4
1144
1145         ################################################################
1146         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1147
1148         vpmuludq        $H0,$T4,$T0             # h0*r0
1149         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1150         vpmuludq        $H1,$T4,$T1             # h1*r0
1151         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1152         vpmuludq        $H2,$T4,$T0             # h2*r0
1153         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1154          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1155         vpmuludq        $H3,$T4,$T1             # h3*r0
1156         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1157         vpmuludq        $H4,$T4,$T4             # h4*r0
1158         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1159
1160         vpmuludq        $H3,$T2,$T0             # h3*r1
1161         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1162          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1163         vpmuludq        $H2,$T2,$T1             # h2*r1
1164         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1165          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1166         vpmuludq        $H1,$T2,$T0             # h1*r1
1167         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1168         vpmuludq        $H0,$T2,$T2             # h0*r1
1169         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1170         vpmuludq        $H4,$T3,$T3             # h4*s1
1171         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1172
1173          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1174         vpmuludq        $H2,$T4,$T1             # h2*r2
1175         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1176         vpmuludq        $H1,$T4,$T0             # h1*r2
1177         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1178          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1179         vpmuludq        $H0,$T4,$T4             # h0*r2
1180         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1181         vpmuludq        $H4,$T2,$T1             # h4*s2
1182         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1183          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1184         vpmuludq        $H3,$T2,$T2             # h3*s2
1185         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1186
1187         vpmuludq        $H1,$T3,$T0             # h1*r3
1188         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1189         vpmuludq        $H0,$T3,$T3             # h0*r3
1190         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1191          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1192         vpmuludq        $H4,$T4,$T1             # h4*s3
1193         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1194          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1195         vpmuludq        $H3,$T4,$T0             # h3*s3
1196         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1197         vpmuludq        $H2,$T4,$T4             # h2*s3
1198         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1199
1200         vpmuludq        $H0,$T2,$T2             # h0*r4
1201         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1202         vpmuludq        $H4,$T3,$T1             # h4*s4
1203         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1204         vpmuludq        $H3,$T3,$T0             # h3*s4
1205         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1206         vpmuludq        $H2,$T3,$T1             # h2*s4
1207         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1208         vpmuludq        $H1,$T3,$T3             # h1*s4
1209         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1210
1211 .Lshort_tail_avx:
1212         ################################################################
1213         # horizontal addition
1214
1215         vpsrldq         \$8,$D4,$T4
1216         vpsrldq         \$8,$D3,$T3
1217         vpsrldq         \$8,$D1,$T1
1218         vpsrldq         \$8,$D0,$T0
1219         vpsrldq         \$8,$D2,$T2
1220         vpaddq          $T3,$D3,$D3
1221         vpaddq          $T4,$D4,$D4
1222         vpaddq          $T0,$D0,$D0
1223         vpaddq          $T1,$D1,$D1
1224         vpaddq          $T2,$D2,$D2
1225
1226         ################################################################
1227         # lazy reduction
1228
1229         vpsrlq          \$26,$D3,$H3
1230         vpand           $MASK,$D3,$D3
1231         vpaddq          $H3,$D4,$D4             # h3 -> h4
1232
1233         vpsrlq          \$26,$D0,$H0
1234         vpand           $MASK,$D0,$D0
1235         vpaddq          $H0,$D1,$D1             # h0 -> h1
1236
1237         vpsrlq          \$26,$D4,$H4
1238         vpand           $MASK,$D4,$D4
1239
1240         vpsrlq          \$26,$D1,$H1
1241         vpand           $MASK,$D1,$D1
1242         vpaddq          $H1,$D2,$D2             # h1 -> h2
1243
1244         vpaddq          $H4,$D0,$D0
1245         vpsllq          \$2,$H4,$H4
1246         vpaddq          $H4,$D0,$D0             # h4 -> h0
1247
1248         vpsrlq          \$26,$D2,$H2
1249         vpand           $MASK,$D2,$D2
1250         vpaddq          $H2,$D3,$D3             # h2 -> h3
1251
1252         vpsrlq          \$26,$D0,$H0
1253         vpand           $MASK,$D0,$D0
1254         vpaddq          $H0,$D1,$D1             # h0 -> h1
1255
1256         vpsrlq          \$26,$D3,$H3
1257         vpand           $MASK,$D3,$D3
1258         vpaddq          $H3,$D4,$D4             # h3 -> h4
1259
1260         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1261         vmovd           $D1,`4*1-48-64`($ctx)
1262         vmovd           $D2,`4*2-48-64`($ctx)
1263         vmovd           $D3,`4*3-48-64`($ctx)
1264         vmovd           $D4,`4*4-48-64`($ctx)
1265 ___
1266 $code.=<<___    if ($win64);
1267         vmovdqa         0x50(%r11),%xmm6
1268         vmovdqa         0x60(%r11),%xmm7
1269         vmovdqa         0x70(%r11),%xmm8
1270         vmovdqa         0x80(%r11),%xmm9
1271         vmovdqa         0x90(%r11),%xmm10
1272         vmovdqa         0xa0(%r11),%xmm11
1273         vmovdqa         0xb0(%r11),%xmm12
1274         vmovdqa         0xc0(%r11),%xmm13
1275         vmovdqa         0xd0(%r11),%xmm14
1276         vmovdqa         0xe0(%r11),%xmm15
1277         lea             0xf8(%r11),%rsp
1278 .Ldo_avx_epilogue:
1279 ___
1280 $code.=<<___    if (!$win64);
1281         lea             0x58(%r11),%rsp
1282 ___
1283 $code.=<<___;
1284         vzeroupper
1285         ret
1286 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1287
1288 .type   poly1305_emit_avx,\@function,3
1289 .align  32
1290 poly1305_emit_avx:
1291         cmpl    \$0,20($ctx)    # is_base2_26?
1292         je      .Lemit
1293
1294         mov     0($ctx),%eax    # load hash value base 2^26
1295         mov     4($ctx),%ecx
1296         mov     8($ctx),%r8d
1297         mov     12($ctx),%r11d
1298         mov     16($ctx),%r10d
1299
1300         shl     \$26,%rcx       # base 2^26 -> base 2^64
1301         mov     %r8,%r9
1302         shl     \$52,%r8
1303         add     %rcx,%rax
1304         shr     \$12,%r9
1305         add     %rax,%r8        # h0
1306         adc     \$0,%r9
1307
1308         shl     \$14,%r11
1309         mov     %r10,%rax
1310         shr     \$24,%r10
1311         add     %r11,%r9
1312         shl     \$40,%rax
1313         add     %rax,%r9        # h1
1314         adc     \$0,%r10        # h2
1315
1316         mov     %r10,%rax       # could be partially reduced, so reduce
1317         mov     %r10,%rcx
1318         and     \$3,%r10
1319         shr     \$2,%rax
1320         and     \$-4,%rcx
1321         add     %rcx,%rax
1322         add     %rax,%r8
1323         adc     \$0,%r9
1324         adc     \$0,%r10
1325
1326         mov     %r8,%rax
1327         add     \$5,%r8         # compare to modulus
1328         mov     %r9,%rcx
1329         adc     \$0,%r9
1330         adc     \$0,%r10
1331         shr     \$2,%r10        # did 130-bit value overfow?
1332         cmovnz  %r8,%rax
1333         cmovnz  %r9,%rcx
1334
1335         add     0($nonce),%rax  # accumulate nonce
1336         adc     8($nonce),%rcx
1337         mov     %rax,0($mac)    # write result
1338         mov     %rcx,8($mac)
1339
1340         ret
1341 .size   poly1305_emit_avx,.-poly1305_emit_avx
1342 ___
1343
1344 if ($avx>1) {
1345 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1346     map("%ymm$_",(0..15));
1347 my $S4=$MASK;
1348
1349 $code.=<<___;
1350 .type   poly1305_blocks_avx2,\@function,4
1351 .align  32
1352 poly1305_blocks_avx2:
1353         mov     20($ctx),%r8d           # is_base2_26
1354         cmp     \$128,$len
1355         jae     .Lblocks_avx2
1356         test    %r8d,%r8d
1357         jz      .Lblocks
1358
1359 .Lblocks_avx2:
1360         and     \$-16,$len
1361         jz      .Lno_data_avx2
1362
1363         vzeroupper
1364
1365         test    %r8d,%r8d
1366         jz      .Lbase2_64_avx2
1367
1368         test    \$63,$len
1369         jz      .Leven_avx2
1370
1371         push    %rbx
1372         push    %rbp
1373         push    %r12
1374         push    %r13
1375         push    %r14
1376         push    %r15
1377 .Lblocks_avx2_body:
1378
1379         mov     $len,%r15               # reassign $len
1380
1381         mov     0($ctx),$d1             # load hash value
1382         mov     8($ctx),$d2
1383         mov     16($ctx),$h2#d
1384
1385         mov     24($ctx),$r0            # load r
1386         mov     32($ctx),$s1
1387
1388         ################################# base 2^26 -> base 2^64
1389         mov     $d1#d,$h0#d
1390         and     \$`-1*(1<<31)`,$d1
1391         mov     $d2,$r1                 # borrow $r1
1392         mov     $d2#d,$h1#d
1393         and     \$`-1*(1<<31)`,$d2
1394
1395         shr     \$6,$d1
1396         shl     \$52,$r1
1397         add     $d1,$h0
1398         shr     \$12,$h1
1399         shr     \$18,$d2
1400         add     $r1,$h0
1401         adc     $d2,$h1
1402
1403         mov     $h2,$d1
1404         shl     \$40,$d1
1405         shr     \$24,$h2
1406         add     $d1,$h1
1407         adc     \$0,$h2                 # can be partially reduced...
1408
1409         mov     \$-4,$d2                # ... so reduce
1410         mov     $h2,$d1
1411         and     $h2,$d2
1412         shr     \$2,$d1
1413         and     \$3,$h2
1414         add     $d2,$d1                 # =*5
1415         add     $d1,$h0
1416         adc     \$0,$h1
1417         adc     \$0,$h2
1418
1419         mov     $s1,$r1
1420         mov     $s1,%rax
1421         shr     \$2,$s1
1422         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1423
1424 .Lbase2_26_pre_avx2:
1425         add     0($inp),$h0             # accumulate input
1426         adc     8($inp),$h1
1427         lea     16($inp),$inp
1428         adc     $padbit,$h2
1429         sub     \$16,%r15
1430
1431         call    __poly1305_block
1432         mov     $r1,%rax
1433
1434         test    \$63,%r15
1435         jnz     .Lbase2_26_pre_avx2
1436
1437         test    $padbit,$padbit         # if $padbit is zero,
1438         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1439
1440         ################################# base 2^64 -> base 2^26
1441         mov     $h0,%rax
1442         mov     $h0,%rdx
1443         shr     \$52,$h0
1444         mov     $h1,$r0
1445         mov     $h1,$r1
1446         shr     \$26,%rdx
1447         and     \$0x3ffffff,%rax        # h[0]
1448         shl     \$12,$r0
1449         and     \$0x3ffffff,%rdx        # h[1]
1450         shr     \$14,$h1
1451         or      $r0,$h0
1452         shl     \$24,$h2
1453         and     \$0x3ffffff,$h0         # h[2]
1454         shr     \$40,$r1
1455         and     \$0x3ffffff,$h1         # h[3]
1456         or      $r1,$h2                 # h[4]
1457
1458         test    %r15,%r15
1459         jz      .Lstore_base2_26_avx2
1460
1461         vmovd   %rax#d,%x#$H0
1462         vmovd   %rdx#d,%x#$H1
1463         vmovd   $h0#d,%x#$H2
1464         vmovd   $h1#d,%x#$H3
1465         vmovd   $h2#d,%x#$H4
1466         jmp     .Lproceed_avx2
1467
1468 .align  32
1469 .Lstore_base2_64_avx2:
1470         mov     $h0,0($ctx)
1471         mov     $h1,8($ctx)
1472         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1473         jmp     .Ldone_avx2
1474
1475 .align  16
1476 .Lstore_base2_26_avx2:
1477         mov     %rax#d,0($ctx)          # store hash value base 2^26
1478         mov     %rdx#d,4($ctx)
1479         mov     $h0#d,8($ctx)
1480         mov     $h1#d,12($ctx)
1481         mov     $h2#d,16($ctx)
1482 .align  16
1483 .Ldone_avx2:
1484         mov     0(%rsp),%r15
1485         mov     8(%rsp),%r14
1486         mov     16(%rsp),%r13
1487         mov     24(%rsp),%r12
1488         mov     32(%rsp),%rbp
1489         mov     40(%rsp),%rbx
1490         lea     48(%rsp),%rsp
1491 .Lno_data_avx2:
1492 .Lblocks_avx2_epilogue:
1493         ret
1494
1495 .align  32
1496 .Lbase2_64_avx2:
1497         push    %rbx
1498         push    %rbp
1499         push    %r12
1500         push    %r13
1501         push    %r14
1502         push    %r15
1503 .Lbase2_64_avx2_body:
1504
1505         mov     $len,%r15               # reassign $len
1506
1507         mov     24($ctx),$r0            # load r
1508         mov     32($ctx),$s1
1509
1510         mov     0($ctx),$h0             # load hash value
1511         mov     8($ctx),$h1
1512         mov     16($ctx),$h2#d
1513
1514         mov     $s1,$r1
1515         mov     $s1,%rax
1516         shr     \$2,$s1
1517         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1518
1519         test    \$63,$len
1520         jz      .Linit_avx2
1521
1522 .Lbase2_64_pre_avx2:
1523         add     0($inp),$h0             # accumulate input
1524         adc     8($inp),$h1
1525         lea     16($inp),$inp
1526         adc     $padbit,$h2
1527         sub     \$16,%r15
1528
1529         call    __poly1305_block
1530         mov     $r1,%rax
1531
1532         test    \$63,%r15
1533         jnz     .Lbase2_64_pre_avx2
1534
1535 .Linit_avx2:
1536         ################################# base 2^64 -> base 2^26
1537         mov     $h0,%rax
1538         mov     $h0,%rdx
1539         shr     \$52,$h0
1540         mov     $h1,$d1
1541         mov     $h1,$d2
1542         shr     \$26,%rdx
1543         and     \$0x3ffffff,%rax        # h[0]
1544         shl     \$12,$d1
1545         and     \$0x3ffffff,%rdx        # h[1]
1546         shr     \$14,$h1
1547         or      $d1,$h0
1548         shl     \$24,$h2
1549         and     \$0x3ffffff,$h0         # h[2]
1550         shr     \$40,$d2
1551         and     \$0x3ffffff,$h1         # h[3]
1552         or      $d2,$h2                 # h[4]
1553
1554         vmovd   %rax#d,%x#$H0
1555         vmovd   %rdx#d,%x#$H1
1556         vmovd   $h0#d,%x#$H2
1557         vmovd   $h1#d,%x#$H3
1558         vmovd   $h2#d,%x#$H4
1559         movl    \$1,20($ctx)            # set is_base2_26
1560
1561         call    __poly1305_init_avx
1562
1563 .Lproceed_avx2:
1564         mov     %r15,$len
1565
1566         mov     0(%rsp),%r15
1567         mov     8(%rsp),%r14
1568         mov     16(%rsp),%r13
1569         mov     24(%rsp),%r12
1570         mov     32(%rsp),%rbp
1571         mov     40(%rsp),%rbx
1572         lea     48(%rsp),%rax
1573         lea     48(%rsp),%rsp
1574 .Lbase2_64_avx2_epilogue:
1575         jmp     .Ldo_avx2
1576
1577 .align  32
1578 .Leven_avx2:
1579         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1580         vmovd           4*1($ctx),%x#$H1
1581         vmovd           4*2($ctx),%x#$H2
1582         vmovd           4*3($ctx),%x#$H3
1583         vmovd           4*4($ctx),%x#$H4
1584
1585 .Ldo_avx2:
1586 ___
1587 $code.=<<___    if (!$win64);
1588         lea             -8(%rsp),%r11
1589         sub             \$0x128,%rsp
1590 ___
1591 $code.=<<___    if ($win64);
1592         lea             -0xf8(%rsp),%r11
1593         sub             \$0x1c8,%rsp
1594         vmovdqa         %xmm6,0x50(%r11)
1595         vmovdqa         %xmm7,0x60(%r11)
1596         vmovdqa         %xmm8,0x70(%r11)
1597         vmovdqa         %xmm9,0x80(%r11)
1598         vmovdqa         %xmm10,0x90(%r11)
1599         vmovdqa         %xmm11,0xa0(%r11)
1600         vmovdqa         %xmm12,0xb0(%r11)
1601         vmovdqa         %xmm13,0xc0(%r11)
1602         vmovdqa         %xmm14,0xd0(%r11)
1603         vmovdqa         %xmm15,0xe0(%r11)
1604 .Ldo_avx2_body:
1605 ___
1606 $code.=<<___;
1607         lea             48+64($ctx),$ctx        # size optimization
1608         lea             .Lconst(%rip),%rcx
1609
1610         # expand and copy pre-calculated table to stack
1611         vmovdqu         `16*0-64`($ctx),%x#$T2
1612         and             \$-512,%rsp
1613         vmovdqu         `16*1-64`($ctx),%x#$T3
1614         vmovdqu         `16*2-64`($ctx),%x#$T4
1615         vmovdqu         `16*3-64`($ctx),%x#$D0
1616         vmovdqu         `16*4-64`($ctx),%x#$D1
1617         vmovdqu         `16*5-64`($ctx),%x#$D2
1618         vmovdqu         `16*6-64`($ctx),%x#$D3
1619         vpermq          \$0x15,$T2,$T2          # 00003412 -> 12343434
1620         vmovdqu         `16*7-64`($ctx),%x#$D4
1621         vpermq          \$0x15,$T3,$T3
1622         vpshufd         \$0xc8,$T2,$T2          # 12343434 -> 14243444
1623         vmovdqu         `16*8-64`($ctx),%x#$MASK
1624         vpermq          \$0x15,$T4,$T4
1625         vpshufd         \$0xc8,$T3,$T3
1626         vmovdqa         $T2,0x00(%rsp)
1627         vpermq          \$0x15,$D0,$D0
1628         vpshufd         \$0xc8,$T4,$T4
1629         vmovdqa         $T3,0x20(%rsp)
1630         vpermq          \$0x15,$D1,$D1
1631         vpshufd         \$0xc8,$D0,$D0
1632         vmovdqa         $T4,0x40(%rsp)
1633         vpermq          \$0x15,$D2,$D2
1634         vpshufd         \$0xc8,$D1,$D1
1635         vmovdqa         $D0,0x60(%rsp)
1636         vpermq          \$0x15,$D3,$D3
1637         vpshufd         \$0xc8,$D2,$D2
1638         vmovdqa         $D1,0x80(%rsp)
1639         vpermq          \$0x15,$D4,$D4
1640         vpshufd         \$0xc8,$D3,$D3
1641         vmovdqa         $D2,0xa0(%rsp)
1642         vpermq          \$0x15,$MASK,$MASK
1643         vpshufd         \$0xc8,$D4,$D4
1644         vmovdqa         $D3,0xc0(%rsp)
1645         vpshufd         \$0xc8,$MASK,$MASK
1646         vmovdqa         $D4,0xe0(%rsp)
1647         vmovdqa         $MASK,0x100(%rsp)
1648         vmovdqa         64(%rcx),$MASK          # .Lmask26
1649
1650         ################################################################
1651         # load input
1652         vmovdqu         16*0($inp),%x#$T0
1653         vmovdqu         16*1($inp),%x#$T1
1654         vinserti128     \$1,16*2($inp),$T0,$T0
1655         vinserti128     \$1,16*3($inp),$T1,$T1
1656         lea             16*4($inp),$inp
1657
1658         vpsrldq         \$6,$T0,$T2             # splat input
1659         vpsrldq         \$6,$T1,$T3
1660         vpunpckhqdq     $T1,$T0,$T4             # 4
1661         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1662         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1663
1664         vpsrlq          \$30,$T2,$T3
1665         vpsrlq          \$4,$T2,$T2
1666         vpsrlq          \$26,$T0,$T1
1667         vpsrlq          \$40,$T4,$T4            # 4
1668         vpand           $MASK,$T2,$T2           # 2
1669         vpand           $MASK,$T0,$T0           # 0
1670         vpand           $MASK,$T1,$T1           # 1
1671         vpand           $MASK,$T3,$T3           # 3
1672         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1673
1674         lea             0x90(%rsp),%rax         # size optimization
1675         vpaddq          $H2,$T2,$H2             # accumulate input
1676         sub             \$64,$len
1677         jz              .Ltail_avx2
1678         jmp             .Loop_avx2
1679
1680 .align  32
1681 .Loop_avx2:
1682         ################################################################
1683         # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1684         # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1685         # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1686         # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1687         #   \________/\________/
1688         ################################################################
1689         #vpaddq         $H2,$T2,$H2             # accumulate input
1690         vpaddq          $H0,$T0,$H0
1691         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1692         vpaddq          $H1,$T1,$H1
1693         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1694         vpaddq          $H3,$T3,$H3
1695         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1696         vpaddq          $H4,$T4,$H4
1697         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1698         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1699
1700         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1701         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1702         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1703         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1704         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1705         #
1706         # however, as h2 is "chronologically" first one available pull
1707         # corresponding operations up, so it's
1708         #
1709         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1710         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1711         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1712         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1713         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1714
1715         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1716         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1717         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1718         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1719         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1720
1721         vpmuludq        $H0,$T1,$T4             # h0*r1
1722         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1723         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1724         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1725         vpmuludq        $H3,$T1,$T4             # h3*r1
1726         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1727         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1728         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1729          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1730
1731         vpmuludq        $H0,$T0,$T4             # h0*r0
1732         vpmuludq        $H1,$T0,$H2             # h1*r0
1733         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1734         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1735         vpmuludq        $H3,$T0,$T4             # h3*r0
1736         vpmuludq        $H4,$T0,$H2             # h4*r0
1737          vmovdqu        16*0($inp),%x#$T0       # load input
1738         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1739         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1740          vinserti128    \$1,16*2($inp),$T0,$T0
1741
1742         vpmuludq        $H3,$T1,$T4             # h3*s2
1743         vpmuludq        $H4,$T1,$H2             # h4*s2
1744          vmovdqu        16*1($inp),%x#$T1
1745         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1746         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1747          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1748         vpmuludq        $H1,$T2,$T4             # h1*r2
1749         vpmuludq        $H0,$T2,$T2             # h0*r2
1750         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1751         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1752          vinserti128    \$1,16*3($inp),$T1,$T1
1753          lea            16*4($inp),$inp
1754
1755         vpmuludq        $H1,$H2,$T4             # h1*r3
1756         vpmuludq        $H0,$H2,$H2             # h0*r3
1757          vpsrldq        \$6,$T0,$T2             # splat input
1758         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1759         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1760         vpmuludq        $H3,$T3,$T4             # h3*s3
1761         vpmuludq        $H4,$T3,$H2             # h4*s3
1762          vpsrldq        \$6,$T1,$T3
1763         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1764         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1765          vpunpckhqdq    $T1,$T0,$T4             # 4
1766
1767         vpmuludq        $H3,$S4,$H3             # h3*s4
1768         vpmuludq        $H4,$S4,$H4             # h4*s4
1769          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1770         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1771         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1772          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1773         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1774         vpmuludq        $H1,$S4,$H0             # h1*s4
1775         vmovdqa         64(%rcx),$MASK          # .Lmask26
1776         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1777         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1778
1779         ################################################################
1780         # lazy reduction (interleaved with tail of input splat)
1781
1782         vpsrlq          \$26,$H3,$D3
1783         vpand           $MASK,$H3,$H3
1784         vpaddq          $D3,$H4,$H4             # h3 -> h4
1785
1786         vpsrlq          \$26,$H0,$D0
1787         vpand           $MASK,$H0,$H0
1788         vpaddq          $D0,$D1,$H1             # h0 -> h1
1789
1790         vpsrlq          \$26,$H4,$D4
1791         vpand           $MASK,$H4,$H4
1792
1793          vpsrlq         \$4,$T3,$T2
1794
1795         vpsrlq          \$26,$H1,$D1
1796         vpand           $MASK,$H1,$H1
1797         vpaddq          $D1,$H2,$H2             # h1 -> h2
1798
1799         vpaddq          $D4,$H0,$H0
1800         vpsllq          \$2,$D4,$D4
1801         vpaddq          $D4,$H0,$H0             # h4 -> h0
1802
1803          vpand          $MASK,$T2,$T2           # 2
1804          vpsrlq         \$26,$T0,$T1
1805
1806         vpsrlq          \$26,$H2,$D2
1807         vpand           $MASK,$H2,$H2
1808         vpaddq          $D2,$H3,$H3             # h2 -> h3
1809
1810          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1811          vpsrlq         \$30,$T3,$T3
1812
1813         vpsrlq          \$26,$H0,$D0
1814         vpand           $MASK,$H0,$H0
1815         vpaddq          $D0,$H1,$H1             # h0 -> h1
1816
1817          vpsrlq         \$40,$T4,$T4            # 4
1818
1819         vpsrlq          \$26,$H3,$D3
1820         vpand           $MASK,$H3,$H3
1821         vpaddq          $D3,$H4,$H4             # h3 -> h4
1822
1823          vpand          $MASK,$T0,$T0           # 0
1824          vpand          $MASK,$T1,$T1           # 1
1825          vpand          $MASK,$T3,$T3           # 3
1826          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1827
1828         sub             \$64,$len
1829         jnz             .Loop_avx2
1830
1831         .byte           0x66,0x90
1832 .Ltail_avx2:
1833         ################################################################
1834         # while above multiplications were by r^4 in all lanes, in last
1835         # iteration we multiply least significant lane by r^4 and most
1836         # significant one by r, so copy of above except that references
1837         # to the precomputed table are displaced by 4...
1838
1839         #vpaddq         $H2,$T2,$H2             # accumulate input
1840         vpaddq          $H0,$T0,$H0
1841         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1842         vpaddq          $H1,$T1,$H1
1843         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1844         vpaddq          $H3,$T3,$H3
1845         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1846         vpaddq          $H4,$T4,$H4
1847         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1848         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1849
1850         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1851         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1852         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1853         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1854         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1855
1856         vpmuludq        $H0,$T1,$T4             # h0*r1
1857         vpmuludq        $H1,$T1,$H2             # h1*r1
1858         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1859         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1860         vpmuludq        $H3,$T1,$T4             # h3*r1
1861         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1862         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1863         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1864
1865         vpmuludq        $H0,$T0,$T4             # h0*r0
1866         vpmuludq        $H1,$T0,$H2             # h1*r0
1867         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1868          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1869         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1870         vpmuludq        $H3,$T0,$T4             # h3*r0
1871         vpmuludq        $H4,$T0,$H2             # h4*r0
1872         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1873         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1874
1875         vpmuludq        $H3,$T1,$T4             # h3*s2
1876         vpmuludq        $H4,$T1,$H2             # h4*s2
1877         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1878         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1879          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1880         vpmuludq        $H1,$T2,$T4             # h1*r2
1881         vpmuludq        $H0,$T2,$T2             # h0*r2
1882         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1883         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1884
1885         vpmuludq        $H1,$H2,$T4             # h1*r3
1886         vpmuludq        $H0,$H2,$H2             # h0*r3
1887         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1888         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1889         vpmuludq        $H3,$T3,$T4             # h3*s3
1890         vpmuludq        $H4,$T3,$H2             # h4*s3
1891         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1892         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1893
1894         vpmuludq        $H3,$S4,$H3             # h3*s4
1895         vpmuludq        $H4,$S4,$H4             # h4*s4
1896         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1897         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1898         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
1899         vpmuludq        $H1,$S4,$H0             # h1*s4
1900         vmovdqa         64(%rcx),$MASK          # .Lmask26
1901         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1902         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1903
1904         ################################################################
1905         # horizontal addition
1906
1907         vpsrldq         \$8,$D1,$T1
1908         vpsrldq         \$8,$H2,$T2
1909         vpsrldq         \$8,$H3,$T3
1910         vpsrldq         \$8,$H4,$T4
1911         vpsrldq         \$8,$H0,$T0
1912         vpaddq          $T1,$D1,$D1
1913         vpaddq          $T2,$H2,$H2
1914         vpaddq          $T3,$H3,$H3
1915         vpaddq          $T4,$H4,$H4
1916         vpaddq          $T0,$H0,$H0
1917
1918         vpermq          \$0x2,$H3,$T3
1919         vpermq          \$0x2,$H4,$T4
1920         vpermq          \$0x2,$H0,$T0
1921         vpermq          \$0x2,$D1,$T1
1922         vpermq          \$0x2,$H2,$T2
1923         vpaddq          $T3,$H3,$H3
1924         vpaddq          $T4,$H4,$H4
1925         vpaddq          $T0,$H0,$H0
1926         vpaddq          $T1,$D1,$D1
1927         vpaddq          $T2,$H2,$H2
1928
1929         ################################################################
1930         # lazy reduction
1931
1932         vpsrlq          \$26,$H3,$D3
1933         vpand           $MASK,$H3,$H3
1934         vpaddq          $D3,$H4,$H4             # h3 -> h4
1935
1936         vpsrlq          \$26,$H0,$D0
1937         vpand           $MASK,$H0,$H0
1938         vpaddq          $D0,$D1,$H1             # h0 -> h1
1939
1940         vpsrlq          \$26,$H4,$D4
1941         vpand           $MASK,$H4,$H4
1942
1943         vpsrlq          \$26,$H1,$D1
1944         vpand           $MASK,$H1,$H1
1945         vpaddq          $D1,$H2,$H2             # h1 -> h2
1946
1947         vpaddq          $D4,$H0,$H0
1948         vpsllq          \$2,$D4,$D4
1949         vpaddq          $D4,$H0,$H0             # h4 -> h0
1950
1951         vpsrlq          \$26,$H2,$D2
1952         vpand           $MASK,$H2,$H2
1953         vpaddq          $D2,$H3,$H3             # h2 -> h3
1954
1955         vpsrlq          \$26,$H0,$D0
1956         vpand           $MASK,$H0,$H0
1957         vpaddq          $D0,$H1,$H1             # h0 -> h1
1958
1959         vpsrlq          \$26,$H3,$D3
1960         vpand           $MASK,$H3,$H3
1961         vpaddq          $D3,$H4,$H4             # h3 -> h4
1962
1963         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1964         vmovd           %x#$H1,`4*1-48-64`($ctx)
1965         vmovd           %x#$H2,`4*2-48-64`($ctx)
1966         vmovd           %x#$H3,`4*3-48-64`($ctx)
1967         vmovd           %x#$H4,`4*4-48-64`($ctx)
1968 ___
1969 $code.=<<___    if ($win64);
1970         vmovdqa         0x50(%r11),%xmm6
1971         vmovdqa         0x60(%r11),%xmm7
1972         vmovdqa         0x70(%r11),%xmm8
1973         vmovdqa         0x80(%r11),%xmm9
1974         vmovdqa         0x90(%r11),%xmm10
1975         vmovdqa         0xa0(%r11),%xmm11
1976         vmovdqa         0xb0(%r11),%xmm12
1977         vmovdqa         0xc0(%r11),%xmm13
1978         vmovdqa         0xd0(%r11),%xmm14
1979         vmovdqa         0xe0(%r11),%xmm15
1980         lea             0xf8(%r11),%rsp
1981 .Ldo_avx2_epilogue:
1982 ___
1983 $code.=<<___    if (!$win64);
1984         lea             8(%r11),%rsp
1985 ___
1986 $code.=<<___;
1987         vzeroupper
1988         ret
1989 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
1990 ___
1991 }
1992 $code.=<<___;
1993 .align  64
1994 .Lconst:
1995 .Lmask24:
1996 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1997 .L129:
1998 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
1999 .Lmask26:
2000 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2001 .Lfive:
2002 .long   5,0,5,0,5,0,5,0
2003 ___
2004 }
2005
2006 $code.=<<___;
2007 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2008 .align  16
2009 ___
2010
2011 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2012 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2013 if ($win64) {
2014 $rec="%rcx";
2015 $frame="%rdx";
2016 $context="%r8";
2017 $disp="%r9";
2018
2019 $code.=<<___;
2020 .extern __imp_RtlVirtualUnwind
2021 .type   se_handler,\@abi-omnipotent
2022 .align  16
2023 se_handler:
2024         push    %rsi
2025         push    %rdi
2026         push    %rbx
2027         push    %rbp
2028         push    %r12
2029         push    %r13
2030         push    %r14
2031         push    %r15
2032         pushfq
2033         sub     \$64,%rsp
2034
2035         mov     120($context),%rax      # pull context->Rax
2036         mov     248($context),%rbx      # pull context->Rip
2037
2038         mov     8($disp),%rsi           # disp->ImageBase
2039         mov     56($disp),%r11          # disp->HandlerData
2040
2041         mov     0(%r11),%r10d           # HandlerData[0]
2042         lea     (%rsi,%r10),%r10        # prologue label
2043         cmp     %r10,%rbx               # context->Rip<.Lprologue
2044         jb      .Lcommon_seh_tail
2045
2046         mov     152($context),%rax      # pull context->Rsp
2047
2048         mov     4(%r11),%r10d           # HandlerData[1]
2049         lea     (%rsi,%r10),%r10        # epilogue label
2050         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2051         jae     .Lcommon_seh_tail
2052
2053         lea     48(%rax),%rax
2054
2055         mov     -8(%rax),%rbx
2056         mov     -16(%rax),%rbp
2057         mov     -24(%rax),%r12
2058         mov     -32(%rax),%r13
2059         mov     -40(%rax),%r14
2060         mov     -48(%rax),%r15
2061         mov     %rbx,144($context)      # restore context->Rbx
2062         mov     %rbp,160($context)      # restore context->Rbp
2063         mov     %r12,216($context)      # restore context->R12
2064         mov     %r13,224($context)      # restore context->R13
2065         mov     %r14,232($context)      # restore context->R14
2066         mov     %r15,240($context)      # restore context->R14
2067
2068         jmp     .Lcommon_seh_tail
2069 .size   se_handler,.-se_handler
2070
2071 .type   avx_handler,\@abi-omnipotent
2072 .align  16
2073 avx_handler:
2074         push    %rsi
2075         push    %rdi
2076         push    %rbx
2077         push    %rbp
2078         push    %r12
2079         push    %r13
2080         push    %r14
2081         push    %r15
2082         pushfq
2083         sub     \$64,%rsp
2084
2085         mov     120($context),%rax      # pull context->Rax
2086         mov     248($context),%rbx      # pull context->Rip
2087
2088         mov     8($disp),%rsi           # disp->ImageBase
2089         mov     56($disp),%r11          # disp->HandlerData
2090
2091         mov     0(%r11),%r10d           # HandlerData[0]
2092         lea     (%rsi,%r10),%r10        # prologue label
2093         cmp     %r10,%rbx               # context->Rip<prologue label
2094         jb      .Lcommon_seh_tail
2095
2096         mov     152($context),%rax      # pull context->Rsp
2097
2098         mov     4(%r11),%r10d           # HandlerData[1]
2099         lea     (%rsi,%r10),%r10        # epilogue label
2100         cmp     %r10,%rbx               # context->Rip>=epilogue label
2101         jae     .Lcommon_seh_tail
2102
2103         mov     208($context),%rax      # pull context->R11
2104
2105         lea     0x50(%rax),%rsi
2106         lea     0xf8(%rax),%rax
2107         lea     512($context),%rdi      # &context.Xmm6
2108         mov     \$20,%ecx
2109         .long   0xa548f3fc              # cld; rep movsq
2110
2111 .Lcommon_seh_tail:
2112         mov     8(%rax),%rdi
2113         mov     16(%rax),%rsi
2114         mov     %rax,152($context)      # restore context->Rsp
2115         mov     %rsi,168($context)      # restore context->Rsi
2116         mov     %rdi,176($context)      # restore context->Rdi
2117
2118         mov     40($disp),%rdi          # disp->ContextRecord
2119         mov     $context,%rsi           # context
2120         mov     \$154,%ecx              # sizeof(CONTEXT)
2121         .long   0xa548f3fc              # cld; rep movsq
2122
2123         mov     $disp,%rsi
2124         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2125         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2126         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2127         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2128         mov     40(%rsi),%r10           # disp->ContextRecord
2129         lea     56(%rsi),%r11           # &disp->HandlerData
2130         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2131         mov     %r10,32(%rsp)           # arg5
2132         mov     %r11,40(%rsp)           # arg6
2133         mov     %r12,48(%rsp)           # arg7
2134         mov     %rcx,56(%rsp)           # arg8, (NULL)
2135         call    *__imp_RtlVirtualUnwind(%rip)
2136
2137         mov     \$1,%eax                # ExceptionContinueSearch
2138         add     \$64,%rsp
2139         popfq
2140         pop     %r15
2141         pop     %r14
2142         pop     %r13
2143         pop     %r12
2144         pop     %rbp
2145         pop     %rbx
2146         pop     %rdi
2147         pop     %rsi
2148         ret
2149 .size   avx_handler,.-avx_handler
2150
2151 .section        .pdata
2152 .align  4
2153         .rva    .LSEH_begin_poly1305_init
2154         .rva    .LSEH_end_poly1305_init
2155         .rva    .LSEH_info_poly1305_init
2156
2157         .rva    .LSEH_begin_poly1305_blocks
2158         .rva    .LSEH_end_poly1305_blocks
2159         .rva    .LSEH_info_poly1305_blocks
2160
2161         .rva    .LSEH_begin_poly1305_emit
2162         .rva    .LSEH_end_poly1305_emit
2163         .rva    .LSEH_info_poly1305_emit
2164 ___
2165 $code.=<<___ if ($avx);
2166         .rva    .LSEH_begin_poly1305_blocks_avx
2167         .rva    .Lbase2_64_avx
2168         .rva    .LSEH_info_poly1305_blocks_avx_1
2169
2170         .rva    .Lbase2_64_avx
2171         .rva    .Leven_avx
2172         .rva    .LSEH_info_poly1305_blocks_avx_2
2173
2174         .rva    .Leven_avx
2175         .rva    .LSEH_end_poly1305_blocks_avx
2176         .rva    .LSEH_info_poly1305_blocks_avx_3
2177
2178         .rva    .LSEH_begin_poly1305_emit_avx
2179         .rva    .LSEH_end_poly1305_emit_avx
2180         .rva    .LSEH_info_poly1305_emit_avx
2181 ___
2182 $code.=<<___ if ($avx>1);
2183         .rva    .LSEH_begin_poly1305_blocks_avx2
2184         .rva    .Lbase2_64_avx2
2185         .rva    .LSEH_info_poly1305_blocks_avx2_1
2186
2187         .rva    .Lbase2_64_avx2
2188         .rva    .Leven_avx2
2189         .rva    .LSEH_info_poly1305_blocks_avx2_2
2190
2191         .rva    .Leven_avx2
2192         .rva    .LSEH_end_poly1305_blocks_avx2
2193         .rva    .LSEH_info_poly1305_blocks_avx2_3
2194 ___
2195 $code.=<<___;
2196 .section        .xdata
2197 .align  8
2198 .LSEH_info_poly1305_init:
2199         .byte   9,0,0,0
2200         .rva    se_handler
2201         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2202
2203 .LSEH_info_poly1305_blocks:
2204         .byte   9,0,0,0
2205         .rva    se_handler
2206         .rva    .Lblocks_body,.Lblocks_epilogue
2207
2208 .LSEH_info_poly1305_emit:
2209         .byte   9,0,0,0
2210         .rva    se_handler
2211         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2212 ___
2213 $code.=<<___ if ($avx);
2214 .LSEH_info_poly1305_blocks_avx_1:
2215         .byte   9,0,0,0
2216         .rva    se_handler
2217         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
2218
2219 .LSEH_info_poly1305_blocks_avx_2:
2220         .byte   9,0,0,0
2221         .rva    se_handler
2222         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
2223
2224 .LSEH_info_poly1305_blocks_avx_3:
2225         .byte   9,0,0,0
2226         .rva    avx_handler
2227         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
2228
2229 .LSEH_info_poly1305_emit_avx:
2230         .byte   9,0,0,0
2231         .rva    se_handler
2232         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2233 ___
2234 $code.=<<___ if ($avx>1);
2235 .LSEH_info_poly1305_blocks_avx2_1:
2236         .byte   9,0,0,0
2237         .rva    se_handler
2238         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
2239
2240 .LSEH_info_poly1305_blocks_avx2_2:
2241         .byte   9,0,0,0
2242         .rva    se_handler
2243         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
2244
2245 .LSEH_info_poly1305_blocks_avx2_3:
2246         .byte   9,0,0,0
2247         .rva    avx_handler
2248         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
2249 ___
2250 }
2251
2252 foreach (split('\n',$code)) {
2253         s/\`([^\`]*)\`/eval($1)/ge;
2254         s/%r([a-z]+)#d/%e$1/g;
2255         s/%r([0-9]+)#d/%r$1d/g;
2256         s/%x#%y/%x/g;
2257
2258         print $_,"\n";
2259 }
2260 close STDOUT;