8977d563a25166b5c3bfac9bb952703c40962cfd
[openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # This module implements Poly1305 hash for x86_64.
11 #
12 # March 2015
13 #
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
16 #
17 #               IALU/gcc-4.8(*) AVX(**)         AVX2
18 # P4            4.90/+120%      -
19 # Core 2        2.39/+90%       -
20 # Westmere      1.86/+120%      -
21 # Sandy Bridge  1.39/+140%      1.10
22 # Haswell       1.10/+175%      1.11            0.65
23 # Skylake       1.12/+120%      0.96            0.51
24 # Silvermont    2.83/+95%       -
25 # VIA Nano      1.82/+150%      -
26 # Sledgehammer  1.38/+160%      -
27 # Bulldozer     2.21/+130%      0.97
28 #
29 # (*)   improvement coefficients relative to clang are more modest and
30 #       are ~50% on most processors, in both cases we are comparing to
31 #       __int128 code;
32 # (**)  SSE2 implementation was attempted, but among non-AVX processors
33 #       it was faster than integer-only code only on older Intel P4 and
34 #       Core processors, 50-30%, less newer processor is, but slower on
35 #       contemporary ones, for example almost 2x slower on Atom, and as
36 #       former are naturally disappearing, SSE2 is deemed unnecessary;
37
38 $flavour = shift;
39 $output  = shift;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
41
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
43
44 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
47 die "can't locate x86_64-xlate.pl";
48
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51         $avx = ($1>=2.19) + ($1>=2.22);
52 }
53
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
55            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56         $avx = ($1>=2.09) + ($1>=2.10);
57 }
58
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
60            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61         $avx = ($1>=10) + ($1>=12);
62 }
63
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65         $avx = ($2>=3.0) + ($2>3.0);
66 }
67
68 open OUT,"| \"$^X\" $xlate $flavour $output";
69 *STDOUT=*OUT;
70
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
75
76 sub poly1305_iteration {
77 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output:       $h0-$h2 *= $r0-$r1
79 $code.=<<___;
80         mulq    $h0                     # h0*r1
81         mov     %rax,$d2
82          mov    $r0,%rax
83         mov     %rdx,$d3
84
85         mulq    $h0                     # h0*r0
86         mov     %rax,$h0                # future $h0
87          mov    $r0,%rax
88         mov     %rdx,$d1
89
90         mulq    $h1                     # h1*r0
91         add     %rax,$d2
92          mov    $s1,%rax
93         adc     %rdx,$d3
94
95         mulq    $h1                     # h1*s1
96          mov    $h2,$h1                 # borrow $h1
97         add     %rax,$h0
98         adc     %rdx,$d1
99
100         imulq   $s1,$h1                 # h2*s1
101         add     $h1,$d2
102          mov    $d1,$h1
103         adc     \$0,$d3
104
105         imulq   $r0,$h2                 # h2*r0
106         add     $d2,$h1
107         mov     \$-4,%rax               # mask value
108         adc     $h2,$d3
109
110         and     $d3,%rax                # last reduction step
111         mov     $d3,$h2
112         shr     \$2,$d3
113         and     \$3,$h2
114         add     $d3,%rax
115         add     %rax,$h0
116         adc     \$0,$h1
117 ___
118 }
119
120 ########################################################################
121 # Layout of opaque area is following.
122 #
123 #       unsigned __int64 h[3];          # current hash value base 2^64
124 #       unsigned __int64 r[2];          # key value base 2^64
125
126 $code.=<<___;
127 .text
128
129 .extern OPENSSL_ia32cap_P
130
131 .globl  poly1305_init
132 .globl  poly1305_blocks
133 .globl  poly1305_emit
134 .type   poly1305_init,\@function,3
135 .align  32
136 poly1305_init:
137         xor     %rax,%rax
138         mov     %rax,0($ctx)            # initialize hash value
139         mov     %rax,8($ctx)
140         mov     %rax,16($ctx)
141
142         cmp     \$0,$inp
143         je      .Lno_key
144
145         lea     poly1305_blocks(%rip),%r10
146         lea     poly1305_emit(%rip),%r11
147 ___
148 $code.=<<___    if ($avx);
149         mov     OPENSSL_ia32cap_P+4(%rip),%r9
150         lea     poly1305_blocks_avx(%rip),%rax
151         lea     poly1305_emit_avx(%rip),%rcx
152         bt      \$`60-32`,%r9           # AVX?
153         cmovc   %rax,%r10
154         cmovc   %rcx,%r11
155 ___
156 $code.=<<___    if ($avx>1);
157         lea     poly1305_blocks_avx2(%rip),%rax
158         bt      \$`5+32`,%r9            # AVX2?
159         cmovc   %rax,%r10
160 ___
161 $code.=<<___;
162         mov     \$0x0ffffffc0fffffff,%rax
163         mov     \$0x0ffffffc0ffffffc,%rcx
164         and     0($inp),%rax
165         and     8($inp),%rcx
166         mov     %rax,24($ctx)
167         mov     %rcx,32($ctx)
168 ___
169 $code.=<<___    if ($flavour !~ /elf32/);
170         mov     %r10,0(%rdx)
171         mov     %r11,8(%rdx)
172 ___
173 $code.=<<___    if ($flavour =~ /elf32/);
174         mov     %r10d,0(%rdx)
175         mov     %r11d,4(%rdx)
176 ___
177 $code.=<<___;
178         mov     \$1,%eax
179 .Lno_key:
180         ret
181 .size   poly1305_init,.-poly1305_init
182
183 .type   poly1305_blocks,\@function,4
184 .align  32
185 poly1305_blocks:
186 .Lblocks:
187         sub     \$16,$len               # too short?
188         jc      .Lno_data
189
190         push    %rbx
191         push    %rbp
192         push    %r12
193         push    %r13
194         push    %r14
195         push    %r15
196 .Lblocks_body:
197
198         mov     $len,%r15               # reassign $len
199
200         mov     24($ctx),$r0            # load r
201         mov     32($ctx),$s1
202
203         mov     0($ctx),$h0             # load hash value
204         mov     8($ctx),$h1
205         mov     16($ctx),$h2
206
207         mov     $s1,$r1
208         shr     \$2,$s1
209         mov     $r1,%rax
210         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
211         jmp     .Loop
212
213 .align  32
214 .Loop:
215         add     0($inp),$h0             # accumulate input
216         adc     8($inp),$h1
217         lea     16($inp),$inp
218         adc     $padbit,$h2
219 ___
220         &poly1305_iteration();
221 $code.=<<___;
222         mov     $r1,%rax
223         sub     \$16,%r15               # len-=16
224         jnc     .Loop
225
226         mov     $h0,0($ctx)             # store hash value
227         mov     $h1,8($ctx)
228         mov     $h2,16($ctx)
229
230         mov     0(%rsp),%r15
231         mov     8(%rsp),%r14
232         mov     16(%rsp),%r13
233         mov     24(%rsp),%r12
234         mov     32(%rsp),%rbp
235         mov     40(%rsp),%rbx
236         lea     48(%rsp),%rsp
237 .Lno_data:
238 .Lblocks_epilogue:
239         ret
240 .size   poly1305_blocks,.-poly1305_blocks
241
242 .type   poly1305_emit,\@function,3
243 .align  32
244 poly1305_emit:
245 .Lemit:
246         mov     0($ctx),%r8     # load hash value
247         mov     8($ctx),%r9
248         mov     16($ctx),%r10
249
250         mov     %r8,%rax
251         add     \$5,%r8         # compare to modulus
252         mov     %r9,%rcx
253         adc     \$0,%r9
254         adc     \$0,%r10
255         shr     \$2,%r10        # did 130-bit value overfow?
256         cmovnz  %r8,%rax
257         cmovnz  %r9,%rcx
258
259         add     0($nonce),%rax  # accumulate nonce
260         adc     8($nonce),%rcx
261         mov     %rax,0($mac)    # write result
262         mov     %rcx,8($mac)
263
264         ret
265 .size   poly1305_emit,.-poly1305_emit
266 ___
267 if ($avx) {
268
269 ########################################################################
270 # Layout of opaque area is following.
271 #
272 #       unsigned __int32 h[5];          # current hash value base 2^26
273 #       unsigned __int32 is_base2_26;
274 #       unsigned __int64 r[2];          # key value base 2^64
275 #       unsigned __int64 pad;
276 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
277 #
278 # where r^n are base 2^26 digits of degrees of multiplier key. There are
279 # 5 digits, but last four are interleaved with multiples of 5, totalling
280 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
281
282 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
283     map("%xmm$_",(0..15));
284
285 $code.=<<___;
286 .type   __poly1305_block,\@abi-omnipotent
287 .align  32
288 __poly1305_block:
289 ___
290         &poly1305_iteration();
291 $code.=<<___;
292         ret
293 .size   __poly1305_block,.-__poly1305_block
294
295 .type   __poly1305_init_avx,\@abi-omnipotent
296 .align  32
297 __poly1305_init_avx:
298         mov     $r0,$h0
299         mov     $r1,$h1
300         xor     $h2,$h2
301
302         lea     48+64($ctx),$ctx        # size optimization
303
304         mov     $r1,%rax
305         call    __poly1305_block        # r^2
306
307         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
308         mov     \$0x3ffffff,%edx
309         mov     $h0,$d1
310         and     $h0#d,%eax
311         mov     $r0,$d2
312         and     $r0#d,%edx
313         mov     %eax,`16*0+0-64`($ctx)
314         shr     \$26,$d1
315         mov     %edx,`16*0+4-64`($ctx)
316         shr     \$26,$d2
317
318         mov     \$0x3ffffff,%eax
319         mov     \$0x3ffffff,%edx
320         and     $d1#d,%eax
321         and     $d2#d,%edx
322         mov     %eax,`16*1+0-64`($ctx)
323         lea     (%rax,%rax,4),%eax      # *5
324         mov     %edx,`16*1+4-64`($ctx)
325         lea     (%rdx,%rdx,4),%edx      # *5
326         mov     %eax,`16*2+0-64`($ctx)
327         shr     \$26,$d1
328         mov     %edx,`16*2+4-64`($ctx)
329         shr     \$26,$d2
330
331         mov     $h1,%rax
332         mov     $r1,%rdx
333         shl     \$12,%rax
334         shl     \$12,%rdx
335         or      $d1,%rax
336         or      $d2,%rdx
337         and     \$0x3ffffff,%eax
338         and     \$0x3ffffff,%edx
339         mov     %eax,`16*3+0-64`($ctx)
340         lea     (%rax,%rax,4),%eax      # *5
341         mov     %edx,`16*3+4-64`($ctx)
342         lea     (%rdx,%rdx,4),%edx      # *5
343         mov     %eax,`16*4+0-64`($ctx)
344         mov     $h1,$d1
345         mov     %edx,`16*4+4-64`($ctx)
346         mov     $r1,$d2
347
348         mov     \$0x3ffffff,%eax
349         mov     \$0x3ffffff,%edx
350         shr     \$14,$d1
351         shr     \$14,$d2
352         and     $d1#d,%eax
353         and     $d2#d,%edx
354         mov     %eax,`16*5+0-64`($ctx)
355         lea     (%rax,%rax,4),%eax      # *5
356         mov     %edx,`16*5+4-64`($ctx)
357         lea     (%rdx,%rdx,4),%edx      # *5
358         mov     %eax,`16*6+0-64`($ctx)
359         shr     \$26,$d1
360         mov     %edx,`16*6+4-64`($ctx)
361         shr     \$26,$d2
362
363         mov     $h2,%rax
364         shl     \$24,%rax
365         or      %rax,$d1
366         mov     $d1#d,`16*7+0-64`($ctx)
367         lea     ($d1,$d1,4),$d1         # *5
368         mov     $d2#d,`16*7+4-64`($ctx)
369         lea     ($d2,$d2,4),$d2         # *5
370         mov     $d1#d,`16*8+0-64`($ctx)
371         mov     $d2#d,`16*8+4-64`($ctx)
372
373         mov     $r1,%rax
374         call    __poly1305_block        # r^3
375
376         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
377         mov     $h0,$d1
378         and     $h0#d,%eax
379         shr     \$26,$d1
380         mov     %eax,`16*0+12-64`($ctx)
381
382         mov     \$0x3ffffff,%edx
383         and     $d1#d,%edx
384         mov     %edx,`16*1+12-64`($ctx)
385         lea     (%rdx,%rdx,4),%edx      # *5
386         shr     \$26,$d1
387         mov     %edx,`16*2+12-64`($ctx)
388
389         mov     $h1,%rax
390         shl     \$12,%rax
391         or      $d1,%rax
392         and     \$0x3ffffff,%eax
393         mov     %eax,`16*3+12-64`($ctx)
394         lea     (%rax,%rax,4),%eax      # *5
395         mov     $h1,$d1
396         mov     %eax,`16*4+12-64`($ctx)
397
398         mov     \$0x3ffffff,%edx
399         shr     \$14,$d1
400         and     $d1#d,%edx
401         mov     %edx,`16*5+12-64`($ctx)
402         lea     (%rdx,%rdx,4),%edx      # *5
403         shr     \$26,$d1
404         mov     %edx,`16*6+12-64`($ctx)
405
406         mov     $h2,%rax
407         shl     \$24,%rax
408         or      %rax,$d1
409         mov     $d1#d,`16*7+12-64`($ctx)
410         lea     ($d1,$d1,4),$d1         # *5
411         mov     $d1#d,`16*8+12-64`($ctx)
412
413         mov     $r1,%rax
414         call    __poly1305_block        # r^4
415
416         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
417         mov     $h0,$d1
418         and     $h0#d,%eax
419         shr     \$26,$d1
420         mov     %eax,`16*0+8-64`($ctx)
421
422         mov     \$0x3ffffff,%edx
423         and     $d1#d,%edx
424         mov     %edx,`16*1+8-64`($ctx)
425         lea     (%rdx,%rdx,4),%edx      # *5
426         shr     \$26,$d1
427         mov     %edx,`16*2+8-64`($ctx)
428
429         mov     $h1,%rax
430         shl     \$12,%rax
431         or      $d1,%rax
432         and     \$0x3ffffff,%eax
433         mov     %eax,`16*3+8-64`($ctx)
434         lea     (%rax,%rax,4),%eax      # *5
435         mov     $h1,$d1
436         mov     %eax,`16*4+8-64`($ctx)
437
438         mov     \$0x3ffffff,%edx
439         shr     \$14,$d1
440         and     $d1#d,%edx
441         mov     %edx,`16*5+8-64`($ctx)
442         lea     (%rdx,%rdx,4),%edx      # *5
443         shr     \$26,$d1
444         mov     %edx,`16*6+8-64`($ctx)
445
446         mov     $h2,%rax
447         shl     \$24,%rax
448         or      %rax,$d1
449         mov     $d1#d,`16*7+8-64`($ctx)
450         lea     ($d1,$d1,4),$d1         # *5
451         mov     $d1#d,`16*8+8-64`($ctx)
452
453         lea     -48-64($ctx),$ctx       # size [de-]optimization
454         ret
455 .size   __poly1305_init_avx,.-__poly1305_init_avx
456
457 .type   poly1305_blocks_avx,\@function,4
458 .align  32
459 poly1305_blocks_avx:
460         mov     20($ctx),%r8d           # is_base2_26
461         cmp     \$128,$len
462         jae     .Lblocks_avx
463         test    %r8d,%r8d
464         jz      .Lblocks
465
466 .Lblocks_avx:
467         and     \$-16,$len
468         jz      .Lno_data_avx
469
470         vzeroupper
471
472         test    %r8d,%r8d
473         jz      .Lbase2_64_avx
474
475         test    \$31,$len
476         jz      .Leven_avx
477
478         push    %rbx
479         push    %rbp
480         push    %r12
481         push    %r13
482         push    %r14
483         push    %r15
484 .Lblocks_avx_body:
485
486         mov     $len,%r15               # reassign $len
487
488         mov     0($ctx),$d1             # load hash value
489         mov     8($ctx),$d2
490         mov     16($ctx),$h2#d
491
492         mov     24($ctx),$r0            # load r
493         mov     32($ctx),$s1
494
495         ################################# base 2^26 -> base 2^64
496         mov     $d1#d,$h0#d
497         and     \$-1<<31,$d1
498         mov     $d2,$r1                 # borrow $r1
499         mov     $d2#d,$h1#d
500         and     \$-1<<31,$d2
501
502         shr     \$6,$d1
503         shl     \$52,$r1
504         add     $d1,$h0
505         shr     \$12,$h1
506         shr     \$18,$d2
507         add     $r1,$h0
508         adc     $d2,$h1
509
510         mov     $h2,$d1
511         shl     \$40,$d1
512         shr     \$24,$h2
513         add     $d1,$h1
514         adc     \$0,$h2                 # can be partially reduced...
515
516         mov     \$-4,$d2                # ... so reduce
517         mov     $h2,$d1
518         and     $h2,$d2
519         shr     \$2,$d1
520         and     \$3,$h2
521         add     $d2,$d1                 # =*5
522         add     $d1,$h0
523         adc     \$0,$h1
524
525         mov     $s1,$r1
526         mov     $s1,%rax
527         shr     \$2,$s1
528         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
529
530         add     0($inp),$h0             # accumulate input
531         adc     8($inp),$h1
532         lea     16($inp),$inp
533         adc     $padbit,$h2
534
535         call    __poly1305_block
536
537         test    $padbit,$padbit         # if $padbit is zero,
538         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
539
540         ################################# base 2^64 -> base 2^26
541         mov     $h0,%rax
542         mov     $h0,%rdx
543         shr     \$52,$h0
544         mov     $h1,$r0
545         mov     $h1,$r1
546         shr     \$26,%rdx
547         and     \$0x3ffffff,%rax        # h[0]
548         shl     \$12,$r0
549         and     \$0x3ffffff,%rdx        # h[1]
550         shr     \$14,$h1
551         or      $r0,$h0
552         shl     \$24,$h2
553         and     \$0x3ffffff,$h0         # h[2]
554         shr     \$40,$r1
555         and     \$0x3ffffff,$h1         # h[3]
556         or      $r1,$h2                 # h[4]
557
558         sub     \$16,%r15
559         jz      .Lstore_base2_26_avx
560
561         vmovd   %rax#d,$H0
562         vmovd   %rdx#d,$H1
563         vmovd   $h0#d,$H2
564         vmovd   $h1#d,$H3
565         vmovd   $h2#d,$H4
566         jmp     .Lproceed_avx
567
568 .align  32
569 .Lstore_base2_64_avx:
570         mov     $h0,0($ctx)
571         mov     $h1,8($ctx)
572         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
573         jmp     .Ldone_avx
574
575 .align  16
576 .Lstore_base2_26_avx:
577         mov     %rax#d,0($ctx)          # store hash value base 2^26
578         mov     %rdx#d,4($ctx)
579         mov     $h0#d,8($ctx)
580         mov     $h1#d,12($ctx)
581         mov     $h2#d,16($ctx)
582 .align  16
583 .Ldone_avx:
584         mov     0(%rsp),%r15
585         mov     8(%rsp),%r14
586         mov     16(%rsp),%r13
587         mov     24(%rsp),%r12
588         mov     32(%rsp),%rbp
589         mov     40(%rsp),%rbx
590         lea     48(%rsp),%rsp
591 .Lno_data_avx:
592 .Lblocks_avx_epilogue:
593         ret
594
595 .align  32
596 .Lbase2_64_avx:
597         push    %rbx
598         push    %rbp
599         push    %r12
600         push    %r13
601         push    %r14
602         push    %r15
603 .Lbase2_64_avx_body:
604
605         mov     $len,%r15               # reassign $len
606
607         mov     24($ctx),$r0            # load r
608         mov     32($ctx),$s1
609
610         mov     0($ctx),$h0             # load hash value
611         mov     8($ctx),$h1
612         mov     16($ctx),$h2#d
613
614         mov     $s1,$r1
615         mov     $s1,%rax
616         shr     \$2,$s1
617         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
618
619         test    \$31,$len
620         jz      .Linit_avx
621
622         add     0($inp),$h0             # accumulate input
623         adc     8($inp),$h1
624         lea     16($inp),$inp
625         adc     $padbit,$h2
626         sub     \$16,%r15
627
628         call    __poly1305_block
629
630 .Linit_avx:
631         ################################# base 2^64 -> base 2^26
632         mov     $h0,%rax
633         mov     $h0,%rdx
634         shr     \$52,$h0
635         mov     $h1,$d1
636         mov     $h1,$d2
637         shr     \$26,%rdx
638         and     \$0x3ffffff,%rax        # h[0]
639         shl     \$12,$d1
640         and     \$0x3ffffff,%rdx        # h[1]
641         shr     \$14,$h1
642         or      $d1,$h0
643         shl     \$24,$h2
644         and     \$0x3ffffff,$h0         # h[2]
645         shr     \$40,$d2
646         and     \$0x3ffffff,$h1         # h[3]
647         or      $d2,$h2                 # h[4]
648
649         vmovd   %rax#d,$H0
650         vmovd   %rdx#d,$H1
651         vmovd   $h0#d,$H2
652         vmovd   $h1#d,$H3
653         vmovd   $h2#d,$H4
654         movl    \$1,20($ctx)            # set is_base2_26
655
656         call    __poly1305_init_avx
657
658 .Lproceed_avx:
659         mov     %r15,$len
660
661         mov     0(%rsp),%r15
662         mov     8(%rsp),%r14
663         mov     16(%rsp),%r13
664         mov     24(%rsp),%r12
665         mov     32(%rsp),%rbp
666         mov     40(%rsp),%rbx
667         lea     48(%rsp),%rax
668         lea     48(%rsp),%rsp
669 .Lbase2_64_avx_epilogue:
670         jmp     .Ldo_avx
671
672 .align  32
673 .Leven_avx:
674         vmovd           4*0($ctx),$H0           # load hash value
675         vmovd           4*1($ctx),$H1
676         vmovd           4*2($ctx),$H2
677         vmovd           4*3($ctx),$H3
678         vmovd           4*4($ctx),$H4
679
680 .Ldo_avx:
681 ___
682 $code.=<<___    if (!$win64);
683         lea             -0x58(%rsp),%r11
684         sub             \$0x178,%rsp
685 ___
686 $code.=<<___    if ($win64);
687         lea             -0xf8(%rsp),%r11
688         sub             \$0x218,%rsp
689         vmovdqa         %xmm6,0x50(%r11)
690         vmovdqa         %xmm7,0x60(%r11)
691         vmovdqa         %xmm8,0x70(%r11)
692         vmovdqa         %xmm9,0x80(%r11)
693         vmovdqa         %xmm10,0x90(%r11)
694         vmovdqa         %xmm11,0xa0(%r11)
695         vmovdqa         %xmm12,0xb0(%r11)
696         vmovdqa         %xmm13,0xc0(%r11)
697         vmovdqa         %xmm14,0xd0(%r11)
698         vmovdqa         %xmm15,0xe0(%r11)
699 .Ldo_avx_body:
700 ___
701 $code.=<<___;
702         sub             \$64,$len
703         lea             -32($inp),%rax
704         cmovc           %rax,$inp
705
706         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
707         lea             `16*3+64`($ctx),$ctx    # size optimization
708         lea             .Lconst(%rip),%rcx
709
710         ################################################################
711         # load input
712         vmovdqu         16*2($inp),$T0
713         vmovdqu         16*3($inp),$T1
714         vmovdqa         64(%rcx),$MASK          # .Lmask26
715
716         vpsrldq         \$6,$T0,$T2             # splat input
717         vpsrldq         \$6,$T1,$T3
718         vpunpckhqdq     $T1,$T0,$T4             # 4
719         vpunpcklqdq     $T1,$T0,$T0             # 0:1
720         vpunpcklqdq     $T3,$T2,$T3             # 2:3
721
722         vpsrlq          \$40,$T4,$T4            # 4
723         vpsrlq          \$26,$T0,$T1
724         vpand           $MASK,$T0,$T0           # 0
725         vpsrlq          \$4,$T3,$T2
726         vpand           $MASK,$T1,$T1           # 1
727         vpsrlq          \$30,$T3,$T3
728         vpand           $MASK,$T2,$T2           # 2
729         vpand           $MASK,$T3,$T3           # 3
730         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
731
732         jbe             .Lskip_loop_avx
733
734         # expand and copy pre-calculated table to stack
735         vmovdqu         `16*1-64`($ctx),$D1
736         vmovdqu         `16*2-64`($ctx),$D2
737         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
738         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
739         vmovdqa         $D3,-0x90(%r11)
740         vmovdqa         $D0,0x00(%rsp)
741         vpshufd         \$0xEE,$D1,$D4
742         vmovdqu         `16*3-64`($ctx),$D0
743         vpshufd         \$0x44,$D1,$D1
744         vmovdqa         $D4,-0x80(%r11)
745         vmovdqa         $D1,0x10(%rsp)
746         vpshufd         \$0xEE,$D2,$D3
747         vmovdqu         `16*4-64`($ctx),$D1
748         vpshufd         \$0x44,$D2,$D2
749         vmovdqa         $D3,-0x70(%r11)
750         vmovdqa         $D2,0x20(%rsp)
751         vpshufd         \$0xEE,$D0,$D4
752         vmovdqu         `16*5-64`($ctx),$D2
753         vpshufd         \$0x44,$D0,$D0
754         vmovdqa         $D4,-0x60(%r11)
755         vmovdqa         $D0,0x30(%rsp)
756         vpshufd         \$0xEE,$D1,$D3
757         vmovdqu         `16*6-64`($ctx),$D0
758         vpshufd         \$0x44,$D1,$D1
759         vmovdqa         $D3,-0x50(%r11)
760         vmovdqa         $D1,0x40(%rsp)
761         vpshufd         \$0xEE,$D2,$D4
762         vmovdqu         `16*7-64`($ctx),$D1
763         vpshufd         \$0x44,$D2,$D2
764         vmovdqa         $D4,-0x40(%r11)
765         vmovdqa         $D2,0x50(%rsp)
766         vpshufd         \$0xEE,$D0,$D3
767         vmovdqu         `16*8-64`($ctx),$D2
768         vpshufd         \$0x44,$D0,$D0
769         vmovdqa         $D3,-0x30(%r11)
770         vmovdqa         $D0,0x60(%rsp)
771         vpshufd         \$0xEE,$D1,$D4
772         vpshufd         \$0x44,$D1,$D1
773         vmovdqa         $D4,-0x20(%r11)
774         vmovdqa         $D1,0x70(%rsp)
775         vpshufd         \$0xEE,$D2,$D3
776          vmovdqa        0x00(%rsp),$D4          # preload r0^2
777         vpshufd         \$0x44,$D2,$D2
778         vmovdqa         $D3,-0x10(%r11)
779         vmovdqa         $D2,0x80(%rsp)
780
781         jmp             .Loop_avx
782
783 .align  32
784 .Loop_avx:
785         ################################################################
786         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
787         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
788         #   \___________________/
789         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
790         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
791         #   \___________________/ \____________________/
792         #
793         # Note that we start with inp[2:3]*r^2. This is because it
794         # doesn't depend on reduction in previous iteration.
795         ################################################################
796         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
797         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
798         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
799         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
800         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
801         #
802         # though note that $Tx and $Hx are "reversed" in this section,
803         # and $D4 is preloaded with r0^2...
804
805         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
806         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
807           vmovdqa       $H2,0x20(%r11)                          # offload hash
808         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
809          vmovdqa        0x10(%rsp),$H2          # r1^2
810         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
811         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
812
813           vmovdqa       $H0,0x00(%r11)                          #
814         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
815           vmovdqa       $H1,0x10(%r11)                          #
816         vpmuludq        $T3,$H2,$H1             # h3*r1
817         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
818         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
819           vmovdqa       $H3,0x30(%r11)                          #
820         vpmuludq        $T2,$H2,$H0             # h2*r1
821         vpmuludq        $T1,$H2,$H1             # h1*r1
822         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
823          vmovdqa        0x30(%rsp),$H3          # r2^2
824         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
825           vmovdqa       $H4,0x40(%r11)                          #
826         vpmuludq        $T0,$H2,$H2             # h0*r1
827          vpmuludq       $T2,$H3,$H0             # h2*r2
828         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
829
830          vmovdqa        0x40(%rsp),$H4          # s2^2
831         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
832         vpmuludq        $T1,$H3,$H1             # h1*r2
833         vpmuludq        $T0,$H3,$H3             # h0*r2
834         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
835          vmovdqa        0x50(%rsp),$H2          # r3^2
836         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
837         vpmuludq        $T4,$H4,$H0             # h4*s2
838         vpmuludq        $T3,$H4,$H4             # h3*s2
839         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
840          vmovdqa        0x60(%rsp),$H3          # s3^2
841         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
842
843          vmovdqa        0x80(%rsp),$H4          # s4^2
844         vpmuludq        $T1,$H2,$H1             # h1*r3
845         vpmuludq        $T0,$H2,$H2             # h0*r3
846         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
847         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
848         vpmuludq        $T4,$H3,$H0             # h4*s3
849         vpmuludq        $T3,$H3,$H1             # h3*s3
850         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
851          vmovdqu        16*0($inp),$H0                          # load input
852         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
853         vpmuludq        $T2,$H3,$H3             # h2*s3
854          vpmuludq       $T2,$H4,$T2             # h2*s4
855         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
856
857          vmovdqu        16*1($inp),$H1                          #
858         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
859         vpmuludq        $T3,$H4,$T3             # h3*s4
860         vpmuludq        $T4,$H4,$T4             # h4*s4
861          vpsrldq        \$6,$H0,$H2                             # splat input
862         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
863         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
864          vpsrldq        \$6,$H1,$H3                             #
865         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
866         vpmuludq        $T1,$H4,$T0             # h1*s4
867          vpunpckhqdq    $H1,$H0,$H4             # 4
868         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
869          vmovdqa        -0x90(%r11),$T4         # r0^4
870         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
871
872         vpunpcklqdq     $H1,$H0,$H0             # 0:1
873         vpunpcklqdq     $H3,$H2,$H3             # 2:3
874
875         #vpsrlq         \$40,$H4,$H4            # 4
876         vpsrldq         \$`40/8`,$H4,$H4        # 4
877         vpsrlq          \$26,$H0,$H1
878         vpand           $MASK,$H0,$H0           # 0
879         vpsrlq          \$4,$H3,$H2
880         vpand           $MASK,$H1,$H1           # 1
881         vpand           0(%rcx),$H4,$H4         # .Lmask24
882         vpsrlq          \$30,$H3,$H3
883         vpand           $MASK,$H2,$H2           # 2
884         vpand           $MASK,$H3,$H3           # 3
885         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
886
887         vpaddq          0x00(%r11),$H0,$H0      # add hash value
888         vpaddq          0x10(%r11),$H1,$H1
889         vpaddq          0x20(%r11),$H2,$H2
890         vpaddq          0x30(%r11),$H3,$H3
891         vpaddq          0x40(%r11),$H4,$H4
892
893         lea             16*2($inp),%rax
894         lea             16*4($inp),$inp
895         sub             \$64,$len
896         cmovc           %rax,$inp
897
898         ################################################################
899         # Now we accumulate (inp[0:1]+hash)*r^4
900         ################################################################
901         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
902         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
903         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
904         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
905         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
906
907         vpmuludq        $H0,$T4,$T0             # h0*r0
908         vpmuludq        $H1,$T4,$T1             # h1*r0
909         vpaddq          $T0,$D0,$D0
910         vpaddq          $T1,$D1,$D1
911          vmovdqa        -0x80(%r11),$T2         # r1^4
912         vpmuludq        $H2,$T4,$T0             # h2*r0
913         vpmuludq        $H3,$T4,$T1             # h3*r0
914         vpaddq          $T0,$D2,$D2
915         vpaddq          $T1,$D3,$D3
916         vpmuludq        $H4,$T4,$T4             # h4*r0
917          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
918         vpaddq          $T4,$D4,$D4
919
920         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
921         vpmuludq        $H2,$T2,$T1             # h2*r1
922         vpmuludq        $H3,$T2,$T0             # h3*r1
923         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
924          vmovdqa        -0x60(%r11),$T3         # r2^4
925         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
926         vpmuludq        $H1,$T2,$T1             # h1*r1
927         vpmuludq        $H0,$T2,$T2             # h0*r1
928         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
929         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
930
931          vmovdqa        -0x50(%r11),$T4         # s2^4
932         vpmuludq        $H2,$T3,$T0             # h2*r2
933         vpmuludq        $H1,$T3,$T1             # h1*r2
934         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
935         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
936          vmovdqa        -0x40(%r11),$T2         # r3^4
937         vpmuludq        $H0,$T3,$T3             # h0*r2
938         vpmuludq        $H4,$T4,$T0             # h4*s2
939         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
940         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
941          vmovdqa        -0x30(%r11),$T3         # s3^4
942         vpmuludq        $H3,$T4,$T4             # h3*s2
943          vpmuludq       $H1,$T2,$T1             # h1*r3
944         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
945
946          vmovdqa        -0x10(%r11),$T4         # s4^4
947         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
948         vpmuludq        $H0,$T2,$T2             # h0*r3
949         vpmuludq        $H4,$T3,$T0             # h4*s3
950         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
951         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
952          vmovdqu        16*2($inp),$T0                          # load input
953         vpmuludq        $H3,$T3,$T2             # h3*s3
954         vpmuludq        $H2,$T3,$T3             # h2*s3
955         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
956          vmovdqu        16*3($inp),$T1                          #
957         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
958
959         vpmuludq        $H2,$T4,$H2             # h2*s4
960         vpmuludq        $H3,$T4,$H3             # h3*s4
961          vpsrldq        \$6,$T0,$T2                             # splat input
962         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
963         vpmuludq        $H4,$T4,$H4             # h4*s4
964          vpsrldq        \$6,$T1,$T3                             #
965         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
966         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
967         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
968         vpmuludq        $H1,$T4,$H0
969          vpunpckhqdq    $T1,$T0,$T4             # 4
970         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
971         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
972
973         vpunpcklqdq     $T1,$T0,$T0             # 0:1
974         vpunpcklqdq     $T3,$T2,$T3             # 2:3
975
976         #vpsrlq         \$40,$T4,$T4            # 4
977         vpsrldq         \$`40/8`,$T4,$T4        # 4
978         vpsrlq          \$26,$T0,$T1
979          vmovdqa        0x00(%rsp),$D4          # preload r0^2
980         vpand           $MASK,$T0,$T0           # 0
981         vpsrlq          \$4,$T3,$T2
982         vpand           $MASK,$T1,$T1           # 1
983         vpand           0(%rcx),$T4,$T4         # .Lmask24
984         vpsrlq          \$30,$T3,$T3
985         vpand           $MASK,$T2,$T2           # 2
986         vpand           $MASK,$T3,$T3           # 3
987         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
988
989         ################################################################
990         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
991         # and P. Schwabe
992
993         vpsrlq          \$26,$H3,$D3
994         vpand           $MASK,$H3,$H3
995         vpaddq          $D3,$H4,$H4             # h3 -> h4
996
997         vpsrlq          \$26,$H0,$D0
998         vpand           $MASK,$H0,$H0
999         vpaddq          $D0,$D1,$H1             # h0 -> h1
1000
1001         vpsrlq          \$26,$H4,$D0
1002         vpand           $MASK,$H4,$H4
1003
1004         vpsrlq          \$26,$H1,$D1
1005         vpand           $MASK,$H1,$H1
1006         vpaddq          $D1,$H2,$H2             # h1 -> h2
1007
1008         vpaddq          $D0,$H0,$H0
1009         vpsllq          \$2,$D0,$D0
1010         vpaddq          $D0,$H0,$H0             # h4 -> h0
1011
1012         vpsrlq          \$26,$H2,$D2
1013         vpand           $MASK,$H2,$H2
1014         vpaddq          $D2,$H3,$H3             # h2 -> h3
1015
1016         vpsrlq          \$26,$H0,$D0
1017         vpand           $MASK,$H0,$H0
1018         vpaddq          $D0,$H1,$H1             # h0 -> h1
1019
1020         vpsrlq          \$26,$H3,$D3
1021         vpand           $MASK,$H3,$H3
1022         vpaddq          $D3,$H4,$H4             # h3 -> h4
1023
1024         ja              .Loop_avx
1025
1026 .Lskip_loop_avx:
1027         ################################################################
1028         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1029
1030         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1031         add             \$32,$len
1032         jnz             .Long_tail_avx
1033
1034         vpaddq          $H2,$T2,$T2
1035         vpaddq          $H0,$T0,$T0
1036         vpaddq          $H1,$T1,$T1
1037         vpaddq          $H3,$T3,$T3
1038         vpaddq          $H4,$T4,$T4
1039
1040 .Long_tail_avx:
1041         vmovdqa         $H2,0x20(%r11)
1042         vmovdqa         $H0,0x00(%r11)
1043         vmovdqa         $H1,0x10(%r11)
1044         vmovdqa         $H3,0x30(%r11)
1045         vmovdqa         $H4,0x40(%r11)
1046
1047         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1048         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1049         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1050         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1051         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1052
1053         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1054         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1055          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1056         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1057         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1058         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1059
1060         vpmuludq        $T3,$H2,$H0             # h3*r1
1061         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1062          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1063         vpmuludq        $T2,$H2,$H1             # h2*r1
1064         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1065          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1066         vpmuludq        $T1,$H2,$H0             # h1*r1
1067         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1068         vpmuludq        $T0,$H2,$H2             # h0*r1
1069         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1070         vpmuludq        $T4,$H3,$H3             # h4*s1
1071         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1072
1073          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1074         vpmuludq        $T2,$H4,$H1             # h2*r2
1075         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1076         vpmuludq        $T1,$H4,$H0             # h1*r2
1077         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1078          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1079         vpmuludq        $T0,$H4,$H4             # h0*r2
1080         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1081         vpmuludq        $T4,$H2,$H1             # h4*s2
1082         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1083          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1084         vpmuludq        $T3,$H2,$H2             # h3*s2
1085         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1086
1087         vpmuludq        $T1,$H3,$H0             # h1*r3
1088         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1089         vpmuludq        $T0,$H3,$H3             # h0*r3
1090         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1091          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1092         vpmuludq        $T4,$H4,$H1             # h4*s3
1093         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1094          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1095         vpmuludq        $T3,$H4,$H0             # h3*s3
1096         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1097         vpmuludq        $T2,$H4,$H4             # h2*s3
1098         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1099
1100         vpmuludq        $T0,$H2,$H2             # h0*r4
1101         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1102         vpmuludq        $T4,$H3,$H1             # h4*s4
1103         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1104         vpmuludq        $T3,$H3,$H0             # h3*s4
1105         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1106         vpmuludq        $T2,$H3,$H1             # h2*s4
1107         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1108         vpmuludq        $T1,$H3,$H3             # h1*s4
1109         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1110
1111         jz              .Lshort_tail_avx
1112
1113         vmovdqu         16*0($inp),$H0          # load input
1114         vmovdqu         16*1($inp),$H1
1115
1116         vpsrldq         \$6,$H0,$H2             # splat input
1117         vpsrldq         \$6,$H1,$H3
1118         vpunpckhqdq     $H1,$H0,$H4             # 4
1119         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1120         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1121
1122         vpsrlq          \$40,$H4,$H4            # 4
1123         vpsrlq          \$26,$H0,$H1
1124         vpand           $MASK,$H0,$H0           # 0
1125         vpsrlq          \$4,$H3,$H2
1126         vpand           $MASK,$H1,$H1           # 1
1127         vpsrlq          \$30,$H3,$H3
1128         vpand           $MASK,$H2,$H2           # 2
1129         vpand           $MASK,$H3,$H3           # 3
1130         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1131
1132         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1133         vpaddq          0x00(%r11),$H0,$H0
1134         vpaddq          0x10(%r11),$H1,$H1
1135         vpaddq          0x20(%r11),$H2,$H2
1136         vpaddq          0x30(%r11),$H3,$H3
1137         vpaddq          0x40(%r11),$H4,$H4
1138
1139         ################################################################
1140         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1141
1142         vpmuludq        $H0,$T4,$T0             # h0*r0
1143         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1144         vpmuludq        $H1,$T4,$T1             # h1*r0
1145         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1146         vpmuludq        $H2,$T4,$T0             # h2*r0
1147         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1148          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1149         vpmuludq        $H3,$T4,$T1             # h3*r0
1150         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1151         vpmuludq        $H4,$T4,$T4             # h4*r0
1152         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1153
1154         vpmuludq        $H3,$T2,$T0             # h3*r1
1155         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1156          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1157         vpmuludq        $H2,$T2,$T1             # h2*r1
1158         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1159          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1160         vpmuludq        $H1,$T2,$T0             # h1*r1
1161         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1162         vpmuludq        $H0,$T2,$T2             # h0*r1
1163         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1164         vpmuludq        $H4,$T3,$T3             # h4*s1
1165         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1166
1167          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1168         vpmuludq        $H2,$T4,$T1             # h2*r2
1169         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1170         vpmuludq        $H1,$T4,$T0             # h1*r2
1171         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1172          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1173         vpmuludq        $H0,$T4,$T4             # h0*r2
1174         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1175         vpmuludq        $H4,$T2,$T1             # h4*s2
1176         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1177          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1178         vpmuludq        $H3,$T2,$T2             # h3*s2
1179         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1180
1181         vpmuludq        $H1,$T3,$T0             # h1*r3
1182         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1183         vpmuludq        $H0,$T3,$T3             # h0*r3
1184         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1185          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1186         vpmuludq        $H4,$T4,$T1             # h4*s3
1187         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1188          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1189         vpmuludq        $H3,$T4,$T0             # h3*s3
1190         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1191         vpmuludq        $H2,$T4,$T4             # h2*s3
1192         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1193
1194         vpmuludq        $H0,$T2,$T2             # h0*r4
1195         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1196         vpmuludq        $H4,$T3,$T1             # h4*s4
1197         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1198         vpmuludq        $H3,$T3,$T0             # h3*s4
1199         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1200         vpmuludq        $H2,$T3,$T1             # h2*s4
1201         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1202         vpmuludq        $H1,$T3,$T3             # h1*s4
1203         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1204
1205 .Lshort_tail_avx:
1206         ################################################################
1207         # horizontal addition
1208
1209         vpsrldq         \$8,$D4,$T4
1210         vpsrldq         \$8,$D3,$T3
1211         vpsrldq         \$8,$D1,$T1
1212         vpsrldq         \$8,$D0,$T0
1213         vpsrldq         \$8,$D2,$T2
1214         vpaddq          $T3,$D3,$D3
1215         vpaddq          $T4,$D4,$D4
1216         vpaddq          $T0,$D0,$D0
1217         vpaddq          $T1,$D1,$D1
1218         vpaddq          $T2,$D2,$D2
1219
1220         ################################################################
1221         # lazy reduction
1222
1223         vpsrlq          \$26,$D3,$H3
1224         vpand           $MASK,$D3,$D3
1225         vpaddq          $H3,$D4,$D4             # h3 -> h4
1226
1227         vpsrlq          \$26,$D0,$H0
1228         vpand           $MASK,$D0,$D0
1229         vpaddq          $H0,$D1,$D1             # h0 -> h1
1230
1231         vpsrlq          \$26,$D4,$H4
1232         vpand           $MASK,$D4,$D4
1233
1234         vpsrlq          \$26,$D1,$H1
1235         vpand           $MASK,$D1,$D1
1236         vpaddq          $H1,$D2,$D2             # h1 -> h2
1237
1238         vpaddq          $H4,$D0,$D0
1239         vpsllq          \$2,$H4,$H4
1240         vpaddq          $H4,$D0,$D0             # h4 -> h0
1241
1242         vpsrlq          \$26,$D2,$H2
1243         vpand           $MASK,$D2,$D2
1244         vpaddq          $H2,$D3,$D3             # h2 -> h3
1245
1246         vpsrlq          \$26,$D0,$H0
1247         vpand           $MASK,$D0,$D0
1248         vpaddq          $H0,$D1,$D1             # h0 -> h1
1249
1250         vpsrlq          \$26,$D3,$H3
1251         vpand           $MASK,$D3,$D3
1252         vpaddq          $H3,$D4,$D4             # h3 -> h4
1253
1254         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1255         vmovd           $D1,`4*1-48-64`($ctx)
1256         vmovd           $D2,`4*2-48-64`($ctx)
1257         vmovd           $D3,`4*3-48-64`($ctx)
1258         vmovd           $D4,`4*4-48-64`($ctx)
1259 ___
1260 $code.=<<___    if ($win64);
1261         vmovdqa         0x50(%r11),%xmm6
1262         vmovdqa         0x60(%r11),%xmm7
1263         vmovdqa         0x70(%r11),%xmm8
1264         vmovdqa         0x80(%r11),%xmm9
1265         vmovdqa         0x90(%r11),%xmm10
1266         vmovdqa         0xa0(%r11),%xmm11
1267         vmovdqa         0xb0(%r11),%xmm12
1268         vmovdqa         0xc0(%r11),%xmm13
1269         vmovdqa         0xd0(%r11),%xmm14
1270         vmovdqa         0xe0(%r11),%xmm15
1271         lea             0xf8(%r11),%rsp
1272 .Ldo_avx_epilogue:
1273 ___
1274 $code.=<<___    if (!$win64);
1275         lea             0x58(%r11),%rsp
1276 ___
1277 $code.=<<___;
1278         vzeroupper
1279         ret
1280 .size   poly1305_blocks_avx,.-poly1305_blocks_avx
1281
1282 .type   poly1305_emit_avx,\@function,3
1283 .align  32
1284 poly1305_emit_avx:
1285         cmpl    \$0,20($ctx)    # is_base2_26?
1286         je      .Lemit
1287
1288         mov     0($ctx),%eax    # load hash value base 2^26
1289         mov     4($ctx),%ecx
1290         mov     8($ctx),%r8d
1291         mov     12($ctx),%r11d
1292         mov     16($ctx),%r10d
1293
1294         shl     \$26,%rcx       # base 2^26 -> base 2^64
1295         mov     %r8,%r9
1296         shl     \$52,%r8
1297         add     %rcx,%rax
1298         shr     \$12,%r9
1299         add     %rax,%r8        # h0
1300         adc     \$0,%r9
1301
1302         shl     \$14,%r11
1303         mov     %r10,%rax
1304         shr     \$24,%r10
1305         add     %r11,%r9
1306         shl     \$40,%rax
1307         add     %rax,%r9        # h1
1308         adc     \$0,%r10        # h2
1309
1310         mov     %r10,%rax       # could be partially reduced, so reduce
1311         mov     %r10,%rcx
1312         and     \$3,%r10
1313         shr     \$2,%rax
1314         and     \$-4,%rcx
1315         add     %rcx,%rax
1316         add     %rax,%r8
1317         adc     \$0,%r9
1318
1319         mov     %r8,%rax
1320         add     \$5,%r8         # compare to modulus
1321         mov     %r9,%rcx
1322         adc     \$0,%r9
1323         adc     \$0,%r10
1324         shr     \$2,%r10        # did 130-bit value overfow?
1325         cmovnz  %r8,%rax
1326         cmovnz  %r9,%rcx
1327
1328         add     0($nonce),%rax  # accumulate nonce
1329         adc     8($nonce),%rcx
1330         mov     %rax,0($mac)    # write result
1331         mov     %rcx,8($mac)
1332
1333         ret
1334 .size   poly1305_emit_avx,.-poly1305_emit_avx
1335 ___
1336
1337 if ($avx>1) {
1338 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1339     map("%ymm$_",(0..15));
1340 my $S4=$MASK;
1341
1342 $code.=<<___;
1343 .type   poly1305_blocks_avx2,\@function,4
1344 .align  32
1345 poly1305_blocks_avx2:
1346         mov     20($ctx),%r8d           # is_base2_26
1347         cmp     \$128,$len
1348         jae     .Lblocks_avx2
1349         test    %r8d,%r8d
1350         jz      .Lblocks
1351
1352 .Lblocks_avx2:
1353         and     \$-16,$len
1354         jz      .Lno_data_avx2
1355
1356         vzeroupper
1357
1358         test    %r8d,%r8d
1359         jz      .Lbase2_64_avx2
1360
1361         test    \$63,$len
1362         jz      .Leven_avx2
1363
1364         push    %rbx
1365         push    %rbp
1366         push    %r12
1367         push    %r13
1368         push    %r14
1369         push    %r15
1370 .Lblocks_avx2_body:
1371
1372         mov     $len,%r15               # reassign $len
1373
1374         mov     0($ctx),$d1             # load hash value
1375         mov     8($ctx),$d2
1376         mov     16($ctx),$h2#d
1377
1378         mov     24($ctx),$r0            # load r
1379         mov     32($ctx),$s1
1380
1381         ################################# base 2^26 -> base 2^64
1382         mov     $d1#d,$h0#d
1383         and     \$-1<<31,$d1
1384         mov     $d2,$r1                 # borrow $r1
1385         mov     $d2#d,$h1#d
1386         and     \$-1<<31,$d2
1387
1388         shr     \$6,$d1
1389         shl     \$52,$r1
1390         add     $d1,$h0
1391         shr     \$12,$h1
1392         shr     \$18,$d2
1393         add     $r1,$h0
1394         adc     $d2,$h1
1395
1396         mov     $h2,$d1
1397         shl     \$40,$d1
1398         shr     \$24,$h2
1399         add     $d1,$h1
1400         adc     \$0,$h2                 # can be partially reduced...
1401
1402         mov     \$-4,$d2                # ... so reduce
1403         mov     $h2,$d1
1404         and     $h2,$d2
1405         shr     \$2,$d1
1406         and     \$3,$h2
1407         add     $d2,$d1                 # =*5
1408         add     $d1,$h0
1409         adc     \$0,$h1
1410
1411         mov     $s1,$r1
1412         mov     $s1,%rax
1413         shr     \$2,$s1
1414         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1415
1416 .Lbase2_26_pre_avx2:
1417         add     0($inp),$h0             # accumulate input
1418         adc     8($inp),$h1
1419         lea     16($inp),$inp
1420         adc     $padbit,$h2
1421         sub     \$16,%r15
1422
1423         call    __poly1305_block
1424         mov     $r1,%rax
1425
1426         test    \$63,%r15
1427         jnz     .Lbase2_26_pre_avx2
1428
1429         test    $padbit,$padbit         # if $padbit is zero,
1430         jz      .Lstore_base2_64_avx2   # store hash in base 2^64 format
1431
1432         ################################# base 2^64 -> base 2^26
1433         mov     $h0,%rax
1434         mov     $h0,%rdx
1435         shr     \$52,$h0
1436         mov     $h1,$r0
1437         mov     $h1,$r1
1438         shr     \$26,%rdx
1439         and     \$0x3ffffff,%rax        # h[0]
1440         shl     \$12,$r0
1441         and     \$0x3ffffff,%rdx        # h[1]
1442         shr     \$14,$h1
1443         or      $r0,$h0
1444         shl     \$24,$h2
1445         and     \$0x3ffffff,$h0         # h[2]
1446         shr     \$40,$r1
1447         and     \$0x3ffffff,$h1         # h[3]
1448         or      $r1,$h2                 # h[4]
1449
1450         test    %r15,%r15
1451         jz      .Lstore_base2_26_avx2
1452
1453         vmovd   %rax#d,%x#$H0
1454         vmovd   %rdx#d,%x#$H1
1455         vmovd   $h0#d,%x#$H2
1456         vmovd   $h1#d,%x#$H3
1457         vmovd   $h2#d,%x#$H4
1458         jmp     .Lproceed_avx2
1459
1460 .align  32
1461 .Lstore_base2_64_avx2:
1462         mov     $h0,0($ctx)
1463         mov     $h1,8($ctx)
1464         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1465         jmp     .Ldone_avx2
1466
1467 .align  16
1468 .Lstore_base2_26_avx2:
1469         mov     %rax#d,0($ctx)          # store hash value base 2^26
1470         mov     %rdx#d,4($ctx)
1471         mov     $h0#d,8($ctx)
1472         mov     $h1#d,12($ctx)
1473         mov     $h2#d,16($ctx)
1474 .align  16
1475 .Ldone_avx2:
1476         mov     0(%rsp),%r15
1477         mov     8(%rsp),%r14
1478         mov     16(%rsp),%r13
1479         mov     24(%rsp),%r12
1480         mov     32(%rsp),%rbp
1481         mov     40(%rsp),%rbx
1482         lea     48(%rsp),%rsp
1483 .Lno_data_avx2:
1484 .Lblocks_avx2_epilogue:
1485         ret
1486
1487 .align  32
1488 .Lbase2_64_avx2:
1489         push    %rbx
1490         push    %rbp
1491         push    %r12
1492         push    %r13
1493         push    %r14
1494         push    %r15
1495 .Lbase2_64_avx2_body:
1496
1497         mov     $len,%r15               # reassign $len
1498
1499         mov     24($ctx),$r0            # load r
1500         mov     32($ctx),$s1
1501
1502         mov     0($ctx),$h0             # load hash value
1503         mov     8($ctx),$h1
1504         mov     16($ctx),$h2#d
1505
1506         mov     $s1,$r1
1507         mov     $s1,%rax
1508         shr     \$2,$s1
1509         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1510
1511         test    \$63,$len
1512         jz      .Linit_avx2
1513
1514 .Lbase2_64_pre_avx2:
1515         add     0($inp),$h0             # accumulate input
1516         adc     8($inp),$h1
1517         lea     16($inp),$inp
1518         adc     $padbit,$h2
1519         sub     \$16,%r15
1520
1521         call    __poly1305_block
1522         mov     $r1,%rax
1523
1524         test    \$63,%r15
1525         jnz     .Lbase2_64_pre_avx2
1526
1527 .Linit_avx2:
1528         ################################# base 2^64 -> base 2^26
1529         mov     $h0,%rax
1530         mov     $h0,%rdx
1531         shr     \$52,$h0
1532         mov     $h1,$d1
1533         mov     $h1,$d2
1534         shr     \$26,%rdx
1535         and     \$0x3ffffff,%rax        # h[0]
1536         shl     \$12,$d1
1537         and     \$0x3ffffff,%rdx        # h[1]
1538         shr     \$14,$h1
1539         or      $d1,$h0
1540         shl     \$24,$h2
1541         and     \$0x3ffffff,$h0         # h[2]
1542         shr     \$40,$d2
1543         and     \$0x3ffffff,$h1         # h[3]
1544         or      $d2,$h2                 # h[4]
1545
1546         vmovd   %rax#d,%x#$H0
1547         vmovd   %rdx#d,%x#$H1
1548         vmovd   $h0#d,%x#$H2
1549         vmovd   $h1#d,%x#$H3
1550         vmovd   $h2#d,%x#$H4
1551         movl    \$1,20($ctx)            # set is_base2_26
1552
1553         call    __poly1305_init_avx
1554
1555 .Lproceed_avx2:
1556         mov     %r15,$len
1557
1558         mov     0(%rsp),%r15
1559         mov     8(%rsp),%r14
1560         mov     16(%rsp),%r13
1561         mov     24(%rsp),%r12
1562         mov     32(%rsp),%rbp
1563         mov     40(%rsp),%rbx
1564         lea     48(%rsp),%rax
1565         lea     48(%rsp),%rsp
1566 .Lbase2_64_avx2_epilogue:
1567         jmp     .Ldo_avx2
1568
1569 .align  32
1570 .Leven_avx2:
1571         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1572         vmovd           4*1($ctx),%x#$H1
1573         vmovd           4*2($ctx),%x#$H2
1574         vmovd           4*3($ctx),%x#$H3
1575         vmovd           4*4($ctx),%x#$H4
1576
1577 .Ldo_avx2:
1578 ___
1579 $code.=<<___    if (!$win64);
1580         lea             -8(%rsp),%r11
1581         sub             \$0x128,%rsp
1582 ___
1583 $code.=<<___    if ($win64);
1584         lea             -0xf8(%rsp),%r11
1585         sub             \$0x1c8,%rsp
1586         vmovdqa         %xmm6,0x50(%r11)
1587         vmovdqa         %xmm7,0x60(%r11)
1588         vmovdqa         %xmm8,0x70(%r11)
1589         vmovdqa         %xmm9,0x80(%r11)
1590         vmovdqa         %xmm10,0x90(%r11)
1591         vmovdqa         %xmm11,0xa0(%r11)
1592         vmovdqa         %xmm12,0xb0(%r11)
1593         vmovdqa         %xmm13,0xc0(%r11)
1594         vmovdqa         %xmm14,0xd0(%r11)
1595         vmovdqa         %xmm15,0xe0(%r11)
1596 .Ldo_avx2_body:
1597 ___
1598 $code.=<<___;
1599         lea             48+64($ctx),$ctx        # size optimization
1600         lea             .Lconst(%rip),%rcx
1601
1602         # expand and copy pre-calculated table to stack
1603         vmovdqu         `16*0-64`($ctx),%x#$T2
1604         and             \$-512,%rsp
1605         vmovdqu         `16*1-64`($ctx),%x#$T3
1606         vmovdqu         `16*2-64`($ctx),%x#$T4
1607         vmovdqu         `16*3-64`($ctx),%x#$D0
1608         vmovdqu         `16*4-64`($ctx),%x#$D1
1609         vmovdqu         `16*5-64`($ctx),%x#$D2
1610         vmovdqu         `16*6-64`($ctx),%x#$D3
1611         vpermq          \$0x15,$T2,$T2          # 00003412 -> 12343434
1612         vmovdqu         `16*7-64`($ctx),%x#$D4
1613         vpermq          \$0x15,$T3,$T3
1614         vpshufd         \$0xc8,$T2,$T2          # 12343434 -> 14243444
1615         vmovdqu         `16*8-64`($ctx),%x#$MASK
1616         vpermq          \$0x15,$T4,$T4
1617         vpshufd         \$0xc8,$T3,$T3
1618         vmovdqa         $T2,0x00(%rsp)
1619         vpermq          \$0x15,$D0,$D0
1620         vpshufd         \$0xc8,$T4,$T4
1621         vmovdqa         $T3,0x20(%rsp)
1622         vpermq          \$0x15,$D1,$D1
1623         vpshufd         \$0xc8,$D0,$D0
1624         vmovdqa         $T4,0x40(%rsp)
1625         vpermq          \$0x15,$D2,$D2
1626         vpshufd         \$0xc8,$D1,$D1
1627         vmovdqa         $D0,0x60(%rsp)
1628         vpermq          \$0x15,$D3,$D3
1629         vpshufd         \$0xc8,$D2,$D2
1630         vmovdqa         $D1,0x80(%rsp)
1631         vpermq          \$0x15,$D4,$D4
1632         vpshufd         \$0xc8,$D3,$D3
1633         vmovdqa         $D2,0xa0(%rsp)
1634         vpermq          \$0x15,$MASK,$MASK
1635         vpshufd         \$0xc8,$D4,$D4
1636         vmovdqa         $D3,0xc0(%rsp)
1637         vpshufd         \$0xc8,$MASK,$MASK
1638         vmovdqa         $D4,0xe0(%rsp)
1639         vmovdqa         $MASK,0x100(%rsp)
1640         vmovdqa         64(%rcx),$MASK          # .Lmask26
1641
1642         ################################################################
1643         # load input
1644         vmovdqu         16*0($inp),%x#$T0
1645         vmovdqu         16*1($inp),%x#$T1
1646         vinserti128     \$1,16*2($inp),$T0,$T0
1647         vinserti128     \$1,16*3($inp),$T1,$T1
1648         lea             16*4($inp),$inp
1649
1650         vpsrldq         \$6,$T0,$T2             # splat input
1651         vpsrldq         \$6,$T1,$T3
1652         vpunpckhqdq     $T1,$T0,$T4             # 4
1653         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1654         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1655
1656         vpsrlq          \$30,$T2,$T3
1657         vpsrlq          \$4,$T2,$T2
1658         vpsrlq          \$26,$T0,$T1
1659         vpsrlq          \$40,$T4,$T4            # 4
1660         vpand           $MASK,$T2,$T2           # 2
1661         vpand           $MASK,$T0,$T0           # 0
1662         vpand           $MASK,$T1,$T1           # 1
1663         vpand           $MASK,$T3,$T3           # 3
1664         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1665
1666         lea             0x90(%rsp),%rax         # size optimization
1667         vpaddq          $H2,$T2,$H2             # accumulate input
1668         sub             \$64,$len
1669         jz              .Ltail_avx2
1670         jmp             .Loop_avx2
1671
1672 .align  32
1673 .Loop_avx2:
1674         ################################################################
1675         # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1676         # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1677         # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1678         # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1679         #   \________/\________/
1680         ################################################################
1681         #vpaddq         $H2,$T2,$H2             # accumulate input
1682         vpaddq          $H0,$T0,$H0
1683         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1684         vpaddq          $H1,$T1,$H1
1685         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1686         vpaddq          $H3,$T3,$H3
1687         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1688         vpaddq          $H4,$T4,$H4
1689         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1690         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1691
1692         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1693         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1694         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1695         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1696         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1697         #
1698         # however, as h2 is "chronologically" first one available pull
1699         # corresponding operations up, so it's
1700         #
1701         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1702         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1703         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1704         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1705         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1706
1707         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1708         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1709         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1710         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1711         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1712
1713         vpmuludq        $H0,$T1,$T4             # h0*r1
1714         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1715         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1716         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1717         vpmuludq        $H3,$T1,$T4             # h3*r1
1718         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1719         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1720         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1721          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1722
1723         vpmuludq        $H0,$T0,$T4             # h0*r0
1724         vpmuludq        $H1,$T0,$H2             # h1*r0
1725         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1726         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1727         vpmuludq        $H3,$T0,$T4             # h3*r0
1728         vpmuludq        $H4,$T0,$H2             # h4*r0
1729          vmovdqu        16*0($inp),%x#$T0       # load input
1730         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1731         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1732          vinserti128    \$1,16*2($inp),$T0,$T0
1733
1734         vpmuludq        $H3,$T1,$T4             # h3*s2
1735         vpmuludq        $H4,$T1,$H2             # h4*s2
1736          vmovdqu        16*1($inp),%x#$T1
1737         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1738         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1739          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1740         vpmuludq        $H1,$T2,$T4             # h1*r2
1741         vpmuludq        $H0,$T2,$T2             # h0*r2
1742         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1743         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1744          vinserti128    \$1,16*3($inp),$T1,$T1
1745          lea            16*4($inp),$inp
1746
1747         vpmuludq        $H1,$H2,$T4             # h1*r3
1748         vpmuludq        $H0,$H2,$H2             # h0*r3
1749          vpsrldq        \$6,$T0,$T2             # splat input
1750         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1751         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1752         vpmuludq        $H3,$T3,$T4             # h3*s3
1753         vpmuludq        $H4,$T3,$H2             # h4*s3
1754          vpsrldq        \$6,$T1,$T3
1755         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1756         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1757          vpunpckhqdq    $T1,$T0,$T4             # 4
1758
1759         vpmuludq        $H3,$S4,$H3             # h3*s4
1760         vpmuludq        $H4,$S4,$H4             # h4*s4
1761          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1762         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1763         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1764          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1765         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1766         vpmuludq        $H1,$S4,$H0             # h1*s4
1767         vmovdqa         64(%rcx),$MASK          # .Lmask26
1768         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1769         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1770
1771         ################################################################
1772         # lazy reduction (interleaved with tail of input splat)
1773
1774         vpsrlq          \$26,$H3,$D3
1775         vpand           $MASK,$H3,$H3
1776         vpaddq          $D3,$H4,$H4             # h3 -> h4
1777
1778         vpsrlq          \$26,$H0,$D0
1779         vpand           $MASK,$H0,$H0
1780         vpaddq          $D0,$D1,$H1             # h0 -> h1
1781
1782         vpsrlq          \$26,$H4,$D4
1783         vpand           $MASK,$H4,$H4
1784
1785          vpsrlq         \$4,$T3,$T2
1786
1787         vpsrlq          \$26,$H1,$D1
1788         vpand           $MASK,$H1,$H1
1789         vpaddq          $D1,$H2,$H2             # h1 -> h2
1790
1791         vpaddq          $D4,$H0,$H0
1792         vpsllq          \$2,$D4,$D4
1793         vpaddq          $D4,$H0,$H0             # h4 -> h0
1794
1795          vpand          $MASK,$T2,$T2           # 2
1796          vpsrlq         \$26,$T0,$T1
1797
1798         vpsrlq          \$26,$H2,$D2
1799         vpand           $MASK,$H2,$H2
1800         vpaddq          $D2,$H3,$H3             # h2 -> h3
1801
1802          vpaddq         $T2,$H2,$H2             # modulo-scheduled
1803          vpsrlq         \$30,$T3,$T3
1804
1805         vpsrlq          \$26,$H0,$D0
1806         vpand           $MASK,$H0,$H0
1807         vpaddq          $D0,$H1,$H1             # h0 -> h1
1808
1809          vpsrlq         \$40,$T4,$T4            # 4
1810
1811         vpsrlq          \$26,$H3,$D3
1812         vpand           $MASK,$H3,$H3
1813         vpaddq          $D3,$H4,$H4             # h3 -> h4
1814
1815          vpand          $MASK,$T0,$T0           # 0
1816          vpand          $MASK,$T1,$T1           # 1
1817          vpand          $MASK,$T3,$T3           # 3
1818          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
1819
1820         sub             \$64,$len
1821         jnz             .Loop_avx2
1822
1823         .byte           0x66,0x90
1824 .Ltail_avx2:
1825         ################################################################
1826         # while above multiplications were by r^4 in all lanes, in last
1827         # iteration we multiply least significant lane by r^4 and most
1828         # significant one by r, so copy of above except that references
1829         # to the precomputed table are displaced by 4...
1830
1831         #vpaddq         $H2,$T2,$H2             # accumulate input
1832         vpaddq          $H0,$T0,$H0
1833         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
1834         vpaddq          $H1,$T1,$H1
1835         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
1836         vpaddq          $H3,$T3,$H3
1837         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
1838         vpaddq          $H4,$T4,$H4
1839         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
1840         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
1841
1842         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1843         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1844         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1845         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1846         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1847
1848         vpmuludq        $H0,$T1,$T4             # h0*r1
1849         vpmuludq        $H1,$T1,$H2             # h1*r1
1850         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1851         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1852         vpmuludq        $H3,$T1,$T4             # h3*r1
1853         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
1854         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1855         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1856
1857         vpmuludq        $H0,$T0,$T4             # h0*r0
1858         vpmuludq        $H1,$T0,$H2             # h1*r0
1859         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1860          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
1861         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1862         vpmuludq        $H3,$T0,$T4             # h3*r0
1863         vpmuludq        $H4,$T0,$H2             # h4*r0
1864         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1865         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1866
1867         vpmuludq        $H3,$T1,$T4             # h3*s2
1868         vpmuludq        $H4,$T1,$H2             # h4*s2
1869         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1870         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1871          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
1872         vpmuludq        $H1,$T2,$T4             # h1*r2
1873         vpmuludq        $H0,$T2,$T2             # h0*r2
1874         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1875         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1876
1877         vpmuludq        $H1,$H2,$T4             # h1*r3
1878         vpmuludq        $H0,$H2,$H2             # h0*r3
1879         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1880         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1881         vpmuludq        $H3,$T3,$T4             # h3*s3
1882         vpmuludq        $H4,$T3,$H2             # h4*s3
1883         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1884         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1885
1886         vpmuludq        $H3,$S4,$H3             # h3*s4
1887         vpmuludq        $H4,$S4,$H4             # h4*s4
1888         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1889         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1890         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
1891         vpmuludq        $H1,$S4,$H0             # h1*s4
1892         vmovdqa         64(%rcx),$MASK          # .Lmask26
1893         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1894         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1895
1896         ################################################################
1897         # horizontal addition
1898
1899         vpsrldq         \$8,$D1,$T1
1900         vpsrldq         \$8,$H2,$T2
1901         vpsrldq         \$8,$H3,$T3
1902         vpsrldq         \$8,$H4,$T4
1903         vpsrldq         \$8,$H0,$T0
1904         vpaddq          $T1,$D1,$D1
1905         vpaddq          $T2,$H2,$H2
1906         vpaddq          $T3,$H3,$H3
1907         vpaddq          $T4,$H4,$H4
1908         vpaddq          $T0,$H0,$H0
1909
1910         vpermq          \$0x2,$H3,$T3
1911         vpermq          \$0x2,$H4,$T4
1912         vpermq          \$0x2,$H0,$T0
1913         vpermq          \$0x2,$D1,$T1
1914         vpermq          \$0x2,$H2,$T2
1915         vpaddq          $T3,$H3,$H3
1916         vpaddq          $T4,$H4,$H4
1917         vpaddq          $T0,$H0,$H0
1918         vpaddq          $T1,$D1,$D1
1919         vpaddq          $T2,$H2,$H2
1920
1921         ################################################################
1922         # lazy reduction
1923
1924         vpsrlq          \$26,$H3,$D3
1925         vpand           $MASK,$H3,$H3
1926         vpaddq          $D3,$H4,$H4             # h3 -> h4
1927
1928         vpsrlq          \$26,$H0,$D0
1929         vpand           $MASK,$H0,$H0
1930         vpaddq          $D0,$D1,$H1             # h0 -> h1
1931
1932         vpsrlq          \$26,$H4,$D4
1933         vpand           $MASK,$H4,$H4
1934
1935         vpsrlq          \$26,$H1,$D1
1936         vpand           $MASK,$H1,$H1
1937         vpaddq          $D1,$H2,$H2             # h1 -> h2
1938
1939         vpaddq          $D4,$H0,$H0
1940         vpsllq          \$2,$D4,$D4
1941         vpaddq          $D4,$H0,$H0             # h4 -> h0
1942
1943         vpsrlq          \$26,$H2,$D2
1944         vpand           $MASK,$H2,$H2
1945         vpaddq          $D2,$H3,$H3             # h2 -> h3
1946
1947         vpsrlq          \$26,$H0,$D0
1948         vpand           $MASK,$H0,$H0
1949         vpaddq          $D0,$H1,$H1             # h0 -> h1
1950
1951         vpsrlq          \$26,$H3,$D3
1952         vpand           $MASK,$H3,$H3
1953         vpaddq          $D3,$H4,$H4             # h3 -> h4
1954
1955         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1956         vmovd           %x#$H1,`4*1-48-64`($ctx)
1957         vmovd           %x#$H2,`4*2-48-64`($ctx)
1958         vmovd           %x#$H3,`4*3-48-64`($ctx)
1959         vmovd           %x#$H4,`4*4-48-64`($ctx)
1960 ___
1961 $code.=<<___    if ($win64);
1962         vmovdqa         0x50(%r11),%xmm6
1963         vmovdqa         0x60(%r11),%xmm7
1964         vmovdqa         0x70(%r11),%xmm8
1965         vmovdqa         0x80(%r11),%xmm9
1966         vmovdqa         0x90(%r11),%xmm10
1967         vmovdqa         0xa0(%r11),%xmm11
1968         vmovdqa         0xb0(%r11),%xmm12
1969         vmovdqa         0xc0(%r11),%xmm13
1970         vmovdqa         0xd0(%r11),%xmm14
1971         vmovdqa         0xe0(%r11),%xmm15
1972         lea             0xf8(%r11),%rsp
1973 .Ldo_avx2_epilogue:
1974 ___
1975 $code.=<<___    if (!$win64);
1976         lea             8(%r11),%rsp
1977 ___
1978 $code.=<<___;
1979         vzeroupper
1980         ret
1981 .size   poly1305_blocks_avx2,.-poly1305_blocks_avx2
1982 ___
1983 }
1984 $code.=<<___;
1985 .align  64
1986 .Lconst:
1987 .Lmask24:
1988 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1989 .L129:
1990 .long   1<<24,0,1<<24,0,1<<24,0,1<<24,0
1991 .Lmask26:
1992 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
1993 .Lfive:
1994 .long   5,0,5,0,5,0,5,0
1995 ___
1996 }
1997
1998 $code.=<<___;
1999 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2000 .align  16
2001 ___
2002
2003 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2004 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2005 if ($win64) {
2006 $rec="%rcx";
2007 $frame="%rdx";
2008 $context="%r8";
2009 $disp="%r9";
2010
2011 $code.=<<___;
2012 .extern __imp_RtlVirtualUnwind
2013 .type   se_handler,\@abi-omnipotent
2014 .align  16
2015 se_handler:
2016         push    %rsi
2017         push    %rdi
2018         push    %rbx
2019         push    %rbp
2020         push    %r12
2021         push    %r13
2022         push    %r14
2023         push    %r15
2024         pushfq
2025         sub     \$64,%rsp
2026
2027         mov     120($context),%rax      # pull context->Rax
2028         mov     248($context),%rbx      # pull context->Rip
2029
2030         mov     8($disp),%rsi           # disp->ImageBase
2031         mov     56($disp),%r11          # disp->HandlerData
2032
2033         mov     0(%r11),%r10d           # HandlerData[0]
2034         lea     (%rsi,%r10),%r10        # prologue label
2035         cmp     %r10,%rbx               # context->Rip<.Lprologue
2036         jb      .Lcommon_seh_tail
2037
2038         mov     152($context),%rax      # pull context->Rsp
2039
2040         mov     4(%r11),%r10d           # HandlerData[1]
2041         lea     (%rsi,%r10),%r10        # epilogue label
2042         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
2043         jae     .Lcommon_seh_tail
2044
2045         lea     48(%rax),%rax
2046
2047         mov     -8(%rax),%rbx
2048         mov     -16(%rax),%rbp
2049         mov     -24(%rax),%r12
2050         mov     -32(%rax),%r13
2051         mov     -40(%rax),%r14
2052         mov     -48(%rax),%r15
2053         mov     %rbx,144($context)      # restore context->Rbx
2054         mov     %rbp,160($context)      # restore context->Rbp
2055         mov     %r12,216($context)      # restore context->R12
2056         mov     %r13,224($context)      # restore context->R13
2057         mov     %r14,232($context)      # restore context->R14
2058         mov     %r15,240($context)      # restore context->R14
2059
2060         jmp     .Lcommon_seh_tail
2061 .size   se_handler,.-se_handler
2062
2063 .type   avx_handler,\@abi-omnipotent
2064 .align  16
2065 avx_handler:
2066         push    %rsi
2067         push    %rdi
2068         push    %rbx
2069         push    %rbp
2070         push    %r12
2071         push    %r13
2072         push    %r14
2073         push    %r15
2074         pushfq
2075         sub     \$64,%rsp
2076
2077         mov     120($context),%rax      # pull context->Rax
2078         mov     248($context),%rbx      # pull context->Rip
2079
2080         mov     8($disp),%rsi           # disp->ImageBase
2081         mov     56($disp),%r11          # disp->HandlerData
2082
2083         mov     0(%r11),%r10d           # HandlerData[0]
2084         lea     (%rsi,%r10),%r10        # prologue label
2085         cmp     %r10,%rbx               # context->Rip<prologue label
2086         jb      .Lcommon_seh_tail
2087
2088         mov     152($context),%rax      # pull context->Rsp
2089
2090         mov     4(%r11),%r10d           # HandlerData[1]
2091         lea     (%rsi,%r10),%r10        # epilogue label
2092         cmp     %r10,%rbx               # context->Rip>=epilogue label
2093         jae     .Lcommon_seh_tail
2094
2095         mov     208($context),%rax      # pull context->R11
2096
2097         lea     0x50(%rax),%rsi
2098         lea     0xf8(%rax),%rax
2099         lea     512($context),%rdi      # &context.Xmm6
2100         mov     \$20,%ecx
2101         .long   0xa548f3fc              # cld; rep movsq
2102
2103 .Lcommon_seh_tail:
2104         mov     8(%rax),%rdi
2105         mov     16(%rax),%rsi
2106         mov     %rax,152($context)      # restore context->Rsp
2107         mov     %rsi,168($context)      # restore context->Rsi
2108         mov     %rdi,176($context)      # restore context->Rdi
2109
2110         mov     40($disp),%rdi          # disp->ContextRecord
2111         mov     $context,%rsi           # context
2112         mov     \$154,%ecx              # sizeof(CONTEXT)
2113         .long   0xa548f3fc              # cld; rep movsq
2114
2115         mov     $disp,%rsi
2116         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2117         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2118         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2119         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2120         mov     40(%rsi),%r10           # disp->ContextRecord
2121         lea     56(%rsi),%r11           # &disp->HandlerData
2122         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2123         mov     %r10,32(%rsp)           # arg5
2124         mov     %r11,40(%rsp)           # arg6
2125         mov     %r12,48(%rsp)           # arg7
2126         mov     %rcx,56(%rsp)           # arg8, (NULL)
2127         call    *__imp_RtlVirtualUnwind(%rip)
2128
2129         mov     \$1,%eax                # ExceptionContinueSearch
2130         add     \$64,%rsp
2131         popfq
2132         pop     %r15
2133         pop     %r14
2134         pop     %r13
2135         pop     %r12
2136         pop     %rbp
2137         pop     %rbx
2138         pop     %rdi
2139         pop     %rsi
2140         ret
2141 .size   avx_handler,.-avx_handler
2142
2143 .section        .pdata
2144 .align  4
2145         .rva    .LSEH_begin_poly1305_init
2146         .rva    .LSEH_end_poly1305_init
2147         .rva    .LSEH_info_poly1305_init
2148
2149         .rva    .LSEH_begin_poly1305_blocks
2150         .rva    .LSEH_end_poly1305_blocks
2151         .rva    .LSEH_info_poly1305_blocks
2152
2153         .rva    .LSEH_begin_poly1305_emit
2154         .rva    .LSEH_end_poly1305_emit
2155         .rva    .LSEH_info_poly1305_emit
2156 ___
2157 $code.=<<___ if ($avx);
2158         .rva    .LSEH_begin_poly1305_blocks_avx
2159         .rva    .Lbase2_64_avx
2160         .rva    .LSEH_info_poly1305_blocks_avx_1
2161
2162         .rva    .Lbase2_64_avx
2163         .rva    .Leven_avx
2164         .rva    .LSEH_info_poly1305_blocks_avx_2
2165
2166         .rva    .Leven_avx
2167         .rva    .LSEH_end_poly1305_blocks_avx
2168         .rva    .LSEH_info_poly1305_blocks_avx_3
2169
2170         .rva    .LSEH_begin_poly1305_emit_avx
2171         .rva    .LSEH_end_poly1305_emit_avx
2172         .rva    .LSEH_info_poly1305_emit_avx
2173 ___
2174 $code.=<<___ if ($avx>1);
2175         .rva    .LSEH_begin_poly1305_blocks_avx2
2176         .rva    .Lbase2_64_avx2
2177         .rva    .LSEH_info_poly1305_blocks_avx2_1
2178
2179         .rva    .Lbase2_64_avx2
2180         .rva    .Leven_avx2
2181         .rva    .LSEH_info_poly1305_blocks_avx2_2
2182
2183         .rva    .Leven_avx2
2184         .rva    .LSEH_end_poly1305_blocks_avx2
2185         .rva    .LSEH_info_poly1305_blocks_avx2_3
2186 ___
2187 $code.=<<___;
2188 .section        .xdata
2189 .align  8
2190 .LSEH_info_poly1305_init:
2191         .byte   9,0,0,0
2192         .rva    se_handler
2193         .rva    .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2194
2195 .LSEH_info_poly1305_blocks:
2196         .byte   9,0,0,0
2197         .rva    se_handler
2198         .rva    .Lblocks_body,.Lblocks_epilogue
2199
2200 .LSEH_info_poly1305_emit:
2201         .byte   9,0,0,0
2202         .rva    se_handler
2203         .rva    .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2204 ___
2205 $code.=<<___ if ($avx);
2206 .LSEH_info_poly1305_blocks_avx_1:
2207         .byte   9,0,0,0
2208         .rva    se_handler
2209         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
2210
2211 .LSEH_info_poly1305_blocks_avx_2:
2212         .byte   9,0,0,0
2213         .rva    se_handler
2214         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
2215
2216 .LSEH_info_poly1305_blocks_avx_3:
2217         .byte   9,0,0,0
2218         .rva    avx_handler
2219         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
2220
2221 .LSEH_info_poly1305_emit_avx:
2222         .byte   9,0,0,0
2223         .rva    se_handler
2224         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2225 ___
2226 $code.=<<___ if ($avx>1);
2227 .LSEH_info_poly1305_blocks_avx2_1:
2228         .byte   9,0,0,0
2229         .rva    se_handler
2230         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
2231
2232 .LSEH_info_poly1305_blocks_avx2_2:
2233         .byte   9,0,0,0
2234         .rva    se_handler
2235         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
2236
2237 .LSEH_info_poly1305_blocks_avx2_3:
2238         .byte   9,0,0,0
2239         .rva    avx_handler
2240         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
2241 ___
2242 }
2243
2244 foreach (split('\n',$code)) {
2245         s/\`([^\`]*)\`/eval($1)/ge;
2246         s/%r([a-z]+)#d/%e$1/g;
2247         s/%r([0-9]+)#d/%r$1d/g;
2248         s/%x#%y/%x/g;
2249
2250         print $_,"\n";
2251 }
2252 close STDOUT;